repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/debug/di/arm/floatconversion.S
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <unixasmmacros.inc> // Arguments // input: (in R0) the adress of the ULONGLONG to be converted to a double // output: the double corresponding to the ULONGLONG input value LEAF_ENTRY FPFillR8, .TEXT .thumb vldr D0, [R0] bx lr LEAF_END FPFillR8, .TEXT
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <unixasmmacros.inc> // Arguments // input: (in R0) the adress of the ULONGLONG to be converted to a double // output: the double corresponding to the ULONGLONG input value LEAF_ENTRY FPFillR8, .TEXT .thumb vldr D0, [R0] bx lr LEAF_END FPFillR8, .TEXT
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/SIMD/AddingSequence_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="AddingSequence.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="AddingSequence.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/compiler.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compUnsafeCastUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD // Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86. featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD); setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsIntegral(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // _MSC_VER #include "hostallocator.h" #include "emit.h" #include "ssabuilder.h" #include "valuenum.h" #include "rangecheck.h" #include "lower.h" #include "stacklevelsetter.h" #include "jittelemetry.h" #include "patchpointinfo.h" #include "jitstd/algorithm.h" extern ICorJitHost* g_jitHost; #if defined(DEBUG) // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. #define COLUMN_OPCODE 30 #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) #define COLUMN_KINDS 110 #define COLUMN_FLAGS (COLUMN_KINDS + 32) #endif #if defined(DEBUG) unsigned Compiler::jitTotalMethodCompiled = 0; #endif // defined(DEBUG) #if defined(DEBUG) LONG Compiler::jitNestingLevel = 0; #endif // defined(DEBUG) // static bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; #ifdef DEBUG // static bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; // static bool Compiler::s_pJitFunctionFileInitialized = false; MethodSet* Compiler::s_pJitMethodSet = nullptr; #endif // DEBUG #ifdef CONFIGURABLE_ARM_ABI // static bool GlobalJitOptions::compFeatureHfa = false; LONG GlobalJitOptions::compUseSoftFPConfigured = 0; #endif // CONFIGURABLE_ARM_ABI /***************************************************************************** * * Little helpers to grab the current cycle counter value; this is done * differently based on target architecture, host toolchain, etc. The * main thing is to keep the overhead absolutely minimal; in fact, on * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles * (which is monotonous) is just too expensive. */ #ifdef FEATURE_JIT_METHOD_PERF #if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) #include <intrin.h> inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { *cycleOut = __rdtsc(); return true; } #elif defined(__GNUC__) inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) { uint32_t hi, lo; __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); return true; } #else // neither _MSC_VER nor __GNUC__ // The following *might* work - might as well try. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif #elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #else // not x86/x64 and not ARM // Don't know what this target is, but let's give it a try; if // someone really wants to make this work, please add the right // code here. #define _our_GetThreadCycles(cp) GetThreadCycles(cp) #endif // which host OS const BYTE genTypeSizes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeAlignments[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al, #include "typelist.h" #undef DEF_TP }; const BYTE genTypeStSzs[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st, #include "typelist.h" #undef DEF_TP }; const BYTE genActualTypes[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType, #include "typelist.h" #undef DEF_TP }; #endif // FEATURE_JIT_METHOD_PERF /*****************************************************************************/ inline unsigned getCurTime() { SYSTEMTIME tim; GetSystemTime(&tim); return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ static FILE* jitSrcFilePtr; static unsigned jitCurSrcLine; void Compiler::JitLogEE(unsigned level, const char* fmt, ...) { va_list args; if (verbose) { va_start(args, fmt); vflogf(jitstdout, fmt, args); va_end(args); } va_start(args, fmt); vlogf(level, fmt, args); va_end(args); } #endif // DEBUG /*****************************************************************************/ #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS static unsigned genMethodCnt; // total number of methods JIT'ted unsigned genMethodICnt; // number of interruptible methods unsigned genMethodNCnt; // number of non-interruptible methods static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0; #endif /*****************************************************************************/ #if MEASURE_NODE_SIZE NodeSizeStats genNodeSizeStats; NodeSizeStats genNodeSizeStatsPerFunc; unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; Histogram genTreeNcntHist(genTreeNcntHistBuckets); unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; Histogram genTreeNsizHist(genTreeNsizHistBuckets); #endif // MEASURE_NODE_SIZE /*****************************************************************************/ #if MEASURE_MEM_ALLOC unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memAllocHist(memAllocHistBuckets); unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; Histogram memUsedHist(memUsedHistBuckets); #endif // MEASURE_MEM_ALLOC /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES size_t grossVMsize; // Total IL code size size_t grossNCsize; // Native code + data size size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) size_t gcHeaderISize; // GC header size: interruptible methods size_t gcPtrMapISize; // GC pointer map size: interruptible methods size_t gcHeaderNSize; // GC header size: non-interruptible methods size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of argument counts. */ #if CALL_ARG_STATS unsigned argTotalCalls; unsigned argHelperCalls; unsigned argStaticCalls; unsigned argNonVirtualCalls; unsigned argVirtualCalls; unsigned argTotalArgs; // total number of args for all calls (including objectPtr) unsigned argTotalDWordArgs; unsigned argTotalLongArgs; unsigned argTotalFloatArgs; unsigned argTotalDoubleArgs; unsigned argTotalRegArgs; unsigned argTotalTemps; unsigned argTotalLclVar; unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argCntTable(argCntBuckets); unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordCntTable(argDWordCntBuckets); unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argDWordLngCntTable(argDWordLngCntBuckets); unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; Histogram argTempsCntTable(argTempsCntBuckets); #endif // CALL_ARG_STATS /***************************************************************************** * * Variables to keep track of basic block counts. */ #if COUNT_BASIC_BLOCKS // -------------------------------------------------- // Basic block count frequency table: // -------------------------------------------------- // <= 1 ===> 26872 count ( 56% of total) // 2 .. 2 ===> 669 count ( 58% of total) // 3 .. 3 ===> 4687 count ( 68% of total) // 4 .. 5 ===> 5101 count ( 78% of total) // 6 .. 10 ===> 5575 count ( 90% of total) // 11 .. 20 ===> 3028 count ( 97% of total) // 21 .. 50 ===> 1108 count ( 99% of total) // 51 .. 100 ===> 182 count ( 99% of total) // 101 .. 1000 ===> 34 count (100% of total) // 1001 .. 10000 ===> 0 count (100% of total) // -------------------------------------------------- unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; Histogram bbCntTable(bbCntBuckets); /* Histogram for the IL opcode size of methods with a single basic block */ unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; Histogram bbOneBBSizeTable(bbSizeBuckets); #endif // COUNT_BASIC_BLOCKS /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS unsigned totalLoopMethods; // counts the total number of methods that have natural loops unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent unsigned totalLoopCount; // counts the total number of natural loops unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent unsigned iterLoopCount; // counts the # of loops with an iterator (for like) unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) bool hasMethodLoops; // flag to keep track if we already counted a method as having loops unsigned loopsThisMethod; // counts the number of loops in the current method bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. /* Histogram for number of loops in a method */ unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; Histogram loopCountTable(loopCountBuckets); /* Histogram for number of loop exits */ unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; Histogram loopExitCountTable(loopExitCountBuckets); #endif // COUNT_LOOPS //------------------------------------------------------------------------ // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types // // Arguments: // gcType - an enum value that originally came from an element // of the BYTE[] returned from getClassGClayout() // // Return Value: // The corresponsing enum value from the JIT's var_types // // Notes: // The gcLayout of each field of a struct is returned from getClassGClayout() // as a BYTE[] but each BYTE element is actually a CorInfoGCType value // Note when we 'know' that there is only one element in theis array // the JIT will often pass the address of a single BYTE, instead of a BYTE[] // var_types Compiler::getJitGCType(BYTE gcType) { var_types result = TYP_UNKNOWN; CorInfoGCType corInfoType = (CorInfoGCType)gcType; if (corInfoType == TYPE_GC_NONE) { result = TYP_I_IMPL; } else if (corInfoType == TYPE_GC_REF) { result = TYP_REF; } else if (corInfoType == TYPE_GC_BYREF) { result = TYP_BYREF; } else { noway_assert(!"Bad value of 'gcType'"); } return result; } #ifdef TARGET_X86 //--------------------------------------------------------------------------- // isTrivialPointerSizedStruct: // Check if the given struct type contains only one pointer-sized integer value type // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type contains only one pointer-sized integer value type, // false otherwise. // bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const { assert(info.compCompHnd->isValueClass(clsHnd)); if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE) { return false; } for (;;) { // all of class chain must be of value type and must have only one field if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) { return false; } CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); var_types vt = JITtype2varType(fieldType); if (fieldType == CORINFO_TYPE_VALUECLASS) { clsHnd = *pClsHnd; } else if (varTypeIsI(vt) && !varTypeIsGC(vt)) { return true; } else { return false; } } } #endif // TARGET_X86 //--------------------------------------------------------------------------- // isNativePrimitiveStructType: // Check if the given struct type is an intrinsic type that should be treated as though // it is not a struct at the unmanaged ABI boundary. // // Arguments: // clsHnd - the handle for the struct type. // // Return Value: // true if the given struct type should be treated as a primitive for unmanaged calls, // false otherwise. // bool Compiler::isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd) { if (!isIntrinsicType(clsHnd)) { return false; } const char* namespaceName = nullptr; const char* typeName = getClassNameFromMetadata(clsHnd, &namespaceName); if (strcmp(namespaceName, "System.Runtime.InteropServices") != 0) { return false; } return strcmp(typeName, "CLong") == 0 || strcmp(typeName, "CULong") == 0 || strcmp(typeName, "NFloat") == 0; } //----------------------------------------------------------------------------- // getPrimitiveTypeForStruct: // Get the "primitive" type that is is used for a struct // of size 'structSize'. // We examine 'clsHnd' to check the GC layout of the struct and // return TYP_REF for structs that simply wrap an object. // If the struct is a one element HFA/HVA, we will return the // proper floating point or vector type. // // Arguments: // structSize - the size of the struct type, cannot be zero // clsHnd - the handle for the struct type, used when may have // an HFA or if we need the GC layout for an object ref. // // Return Value: // The primitive type (i.e. byte, short, int, long, ref, float, double) // used to pass or return structs of this size. // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // Notes: // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not // considered a primitive type by this method. // So a struct that wraps a 'long' is passed and returned in the // same way as any other 8-byte struct // For ARM32 if we have an HFA struct that wraps a 64-bit double // we will return TYP_DOUBLE. // For vector calling conventions, a vector is considered a "primitive" // type, as it is passed in a single register. // var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) { assert(structSize != 0); var_types useType = TYP_UNKNOWN; // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { case 4: case 8: #ifdef TARGET_ARM64 case 16: #endif // TARGET_ARM64 { var_types hfaType = GetHfaType(clsHnd); // We're only interested in the case where the struct size is equal to the size of the hfaType. if (varTypeIsValidHfaType(hfaType)) { if (genTypeSize(hfaType) == structSize) { useType = hfaType; } else { return TYP_UNKNOWN; } } } } if (useType != TYP_UNKNOWN) { return useType; } } } // Now deal with non-HFA/HVA structs. switch (structSize) { case 1: useType = TYP_BYTE; break; case 2: useType = TYP_SHORT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; #if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // TARGET_64BIT case TARGET_POINTER_SIZE: { BYTE gcPtr = 0; // Check if this pointer-sized struct is wrapping a GC object info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); useType = getJitGCType(gcPtr); } break; default: useType = TYP_UNKNOWN; break; } return useType; } //----------------------------------------------------------------------------- // getArgTypeForStruct: // Get the type that is used to pass values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional fourth argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // wbPassStruct - An "out" argument with information about how // the struct is to be passed // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbPassStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbPassStruct is SPK_PrimitiveType this method's return value // is the primitive type used to pass the struct. // When *wbPassStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is passed by reference to a copy // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is passed by value either // using multiple registers or on the stack. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type we return the appropriate // floating point primitive type and *wbPassStruct is SPK_PrimitiveType // If there are two or more elements in the HFA type then the this method's // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa // var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize) { var_types useType = TYP_UNKNOWN; structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return assert(structSize != 0); // Determine if we can pass the struct as a primitive type. // Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks. #ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) { // We can't pass this as a primitive type. } else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is passed as a floating type, use that. // Otherwise, we'll use the general case - we don't want to use the "EightByteType" // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and // we need to preserve small types. useType = GetEightByteType(structDesc, 0); } else #endif // UNIX_AMD64_ABI // The largest arg passed in a single register is MAX_PASS_SINGLEREG_BYTES, // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. // if (structSize <= MAX_PASS_SINGLEREG_BYTES) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } #else if (isTrivialPointerSizedStruct(clsHnd)) { useType = TYP_I_IMPL; } #endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // if (useType != TYP_UNKNOWN) { // Yes, we should use the "primitive" type in 'useType' howToPassStruct = SPK_PrimitiveType; } else // We can't replace the struct with a "primitive" type { // See if we can pass this struct by value, possibly in multiple registers // or if we should pass it by reference to a copy // if (structSize <= MAX_PASS_MULTIREG_BYTES) { // Structs that are HFA/HVA's are passed by value in multiple registers. // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else { hfaType = GetHfaType(clsHnd); } if (varTypeIsValidHfaType(hfaType)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is passed by value as an HFA // using multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The case of (structDesc.eightByteCount == 1) should have already been handled if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else { assert(structDesc.eightByteCount == 0); // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is passed by value in multiple registers // (when all of the parameters registers are used, then the stack will be used) howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register // (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_PASS_MULTIREG_BYTES) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) howToPassStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getArgTypeForStruct"); #endif // TARGET_XXX } } // 'howToPassStruct' must be set to one of the valid values before we return assert(howToPassStruct != SPK_Unknown); if (wbPassStruct != nullptr) { *wbPassStruct = howToPassStruct; } return useType; } //----------------------------------------------------------------------------- // getReturnTypeForStruct: // Get the type that is used to return values of the given struct type. // If you have already retrieved the struct size then it should be // passed as the optional third argument, as this allows us to avoid // an extra call to getClassSize(clsHnd) // // Arguments: // clsHnd - the handle for the struct type // callConv - the calling convention of the function // that returns this struct. // wbReturnStruct - An "out" argument with information about how // the struct is to be returned // structSize - the size of the struct type, // or zero if we should call getClassSize(clsHnd) // // Return Value: // For wbReturnStruct you can pass a 'nullptr' and nothing will be written // or returned for that out parameter. // When *wbReturnStruct is SPK_PrimitiveType this method's return value // is the primitive type used to return the struct. // When *wbReturnStruct is SPK_ByReference this method's return value // is always TYP_UNKNOWN and the struct type is returned using a return buffer // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value // is always TYP_STRUCT and the struct type is returned using multiple registers. // // Assumptions: // The size must be the size of the given type. // The given class handle must be for a value type (struct). // // Notes: // About HFA types: // When the clsHnd is a one element HFA type then this method's return // value is the appropriate floating point primitive type and // *wbReturnStruct is SPK_PrimitiveType. // If there are two or more elements in the HFA type and the target supports // multireg return types then the return value is TYP_STRUCT and // *wbReturnStruct is SPK_ByValueAsHfa. // Additionally if there are two or more elements in the HFA type and // the target doesn't support multreg return types then it is treated // as if it wasn't an HFA type. // About returning TYP_STRUCT: // Whenever this method's return value is TYP_STRUCT it always means // that multiple registers are used to return this struct. // var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbReturnStruct /* = nullptr */, unsigned structSize /* = 0 */) { var_types useType = TYP_UNKNOWN; structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return bool canReturnInRegister = true; assert(clsHnd != NO_CLASS_HANDLE); if (structSize == 0) { structSize = info.compCompHnd->getClassSize(clsHnd); } assert(structSize > 0); #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be returned in a floating point register // So we always consult the struct "Classifier" routine // SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); if (structDesc.eightByteCount == 1) { assert(structSize <= sizeof(double)); assert(structDesc.passedInRegisters); if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) { // If this is returned as a floating type, use that. // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. useType = GetEightByteType(structDesc, 0); howToReturnStruct = SPK_PrimitiveType; } } else { // Return classification is not always size based... canReturnInRegister = structDesc.passedInRegisters; if (!canReturnInRegister) { assert(structDesc.eightByteCount == 0); howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } #elif UNIX_X86_ABI if (callConv != CorInfoCallConvExtension::Managed && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #endif if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } // Check for cases where a small struct is returned in a register // via a primitive type. // // The largest "primitive type" is MAX_PASS_SINGLEREG_BYTES // so we can skip calling getPrimitiveTypeForStruct when we // have a struct that is larger than that. if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= MAX_PASS_SINGLEREG_BYTES)) { // We set the "primitive" useType based upon the structSize // and also examine the clsHnd to see if it is an HFA of count one // // The ABI for struct returns in varArg methods, is same as the normal case, // so pass false for isVararg useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); if (useType != TYP_UNKNOWN) { if (structSize == genTypeSize(useType)) { // Currently: 1, 2, 4, or 8 byte structs howToReturnStruct = SPK_PrimitiveType; } else { // Currently: 3, 5, 6, or 7 byte structs assert(structSize < genTypeSize(useType)); howToReturnStruct = SPK_EnclosingType; } } } #ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats // because when HFA are enabled, normally we would use two FP registers to pass or return it // // But if we don't have support for multiple register return types, we have to change this. // Since what we have is an 8-byte struct (float + float) we change useType to TYP_I_IMPL // so that the struct is returned instead using an 8-byte integer register. // if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) { useType = TYP_I_IMPL; howToReturnStruct = SPK_PrimitiveType; } #endif // Did we change this struct type into a simple "primitive" type? if (useType != TYP_UNKNOWN) { // If so, we should have already set howToReturnStruct, too. assert(howToReturnStruct != SPK_Unknown); } else if (canReturnInRegister) // We can't replace the struct with a "primitive" type { // See if we can return this struct by value, possibly in multiple registers // or if we should return it using a return buffer register // if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) { // Structs that are HFA's are returned in multiple registers if (IsHfa(clsHnd)) { // HFA's of count one should have been handled by getPrimitiveTypeForStruct assert(GetHfaCount(clsHnd) >= 2); // setup wbPassType and useType indicate that this is returned by value as an HFA // using multiple registers howToReturnStruct = SPK_ByValueAsHfa; useType = TYP_STRUCT; } else // Not an HFA struct type { #ifdef UNIX_AMD64_ABI // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0) // should have already been handled assert(structDesc.eightByteCount > 1); // setup wbPassType and useType indicate that this is returned by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; assert(structDesc.passedInRegisters == true); #elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers // if (structSize <= (TARGET_POINTER_SIZE * 2)) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else // a structSize that is 17-32 bytes in size { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_X86) // Only 8-byte structs are return in multiple registers. // We also only support multireg struct returns on x86 to match the native calling convention. // So return 8-byte structs only when the calling convention is a native calling convention. if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed) { // setup wbPassType and useType indicate that this is return by value in multiple registers howToReturnStruct = SPK_ByValue; useType = TYP_STRUCT; } else { // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } #elif defined(TARGET_ARM) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; #else // TARGET_XXX noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)"); #endif // TARGET_XXX } } else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) { // We have a (large) struct that can't be replaced with a "primitive" type // and can't be returned in multiple registers // We return this struct using a return buffer register // setup wbPassType and useType indicate that this is returned using a return buffer register // (reference to a return buffer) howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } } // 'howToReturnStruct' must be set to one of the valid values before we return assert(howToReturnStruct != SPK_Unknown); if (wbReturnStruct != nullptr) { *wbReturnStruct = howToReturnStruct; } return useType; } /////////////////////////////////////////////////////////////////////////////// // // MEASURE_NOWAY: code to measure and rank dynamic occurrences of noway_assert. // (Just the appearances of noway_assert, whether the assert is true or false.) // This might help characterize the cost of noway_assert in non-DEBUG builds, // or determine which noway_assert should be simple DEBUG-only asserts. // /////////////////////////////////////////////////////////////////////////////// #if MEASURE_NOWAY struct FileLine { char* m_file; unsigned m_line; char* m_condStr; FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) { } FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) { size_t newSize = (strlen(file) + 1) * sizeof(char); m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_file, newSize, file); newSize = (strlen(condStr) + 1) * sizeof(char); m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); strcpy_s(m_condStr, newSize, condStr); } FileLine(const FileLine& other) { m_file = other.m_file; m_line = other.m_line; m_condStr = other.m_condStr; } // GetHashCode() and Equals() are needed by JitHashTable static unsigned GetHashCode(FileLine fl) { assert(fl.m_file != nullptr); unsigned code = fl.m_line; for (const char* p = fl.m_file; *p != '\0'; p++) { code += *p; } // Could also add condStr. return code; } static bool Equals(FileLine fl1, FileLine fl2) { return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); } }; typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; FileLineToCountMap* NowayAssertMap; void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) { if (NowayAssertMap == nullptr) { NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); } FileLine fl(filename, line, condStr); size_t* pCount = NowayAssertMap->LookupPointer(fl); if (pCount == nullptr) { NowayAssertMap->Set(fl, 1); } else { ++(*pCount); } } void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) { if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) { JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); } } struct NowayAssertCountMap { size_t count; FileLine fl; NowayAssertCountMap() : count(0) { } struct compare { bool operator()(const NowayAssertCountMap& elem1, const NowayAssertCountMap& elem2) { return (ssize_t)elem2.count < (ssize_t)elem1.count; // sort in descending order } }; }; void DisplayNowayAssertMap() { if (NowayAssertMap != nullptr) { FILE* fout; LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); if (strJitMeasureNowayAssertFile != nullptr) { fout = _wfopen(strJitMeasureNowayAssertFile, W("a")); if (fout == nullptr) { fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n", strJitMeasureNowayAssertFile); return; } } else { fout = jitstdout; } // Iterate noway assert map, create sorted table by occurrence, dump it. unsigned count = NowayAssertMap->GetCount(); NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; unsigned i = 0; for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); !iter.Equal(end); ++iter) { nacp[i].count = iter.GetValue(); nacp[i].fl = iter.Get(); ++i; } jitstd::sort(nacp, nacp + count, NowayAssertCountMap::compare()); if (fout == jitstdout) { // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. fprintf(fout, "\nnoway_assert counts:\n"); fprintf(fout, "count, file, line, text\n"); } for (i = 0; i < count; i++) { fprintf(fout, "%u, %s, %u, \"%s\"\n", nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, nacp[i].fl.m_condStr); } if (fout != jitstdout) { fclose(fout); fout = nullptr; } } } #endif // MEASURE_NOWAY /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE size_t genFlowNodeSize; size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE /*****************************************************************************/ // We keep track of methods we've already compiled. /***************************************************************************** * Declare the statics */ #ifdef DEBUG /* static */ LONG Compiler::s_compMethodsCount = 0; // to produce unique label names #endif #if MEASURE_MEM_ALLOC /* static */ bool Compiler::s_dspMemStats = false; #endif #ifndef PROFILING_SUPPORTED const bool Compiler::Options::compNoPInvokeInlineCB = false; #endif /***************************************************************************** * * One time initialization code */ /* static */ void Compiler::compStartup() { #if DISPLAY_SIZES grossVMsize = grossNCsize = totalNCsize = 0; #endif // DISPLAY_SIZES /* Initialize the table of tree node sizes */ GenTree::InitNodeSize(); #ifdef JIT32_GCENCODER // Initialize the GC encoder lookup table GCInfo::gcInitEncoderLookupTable(); #endif /* Initialize the emitter */ emitter::emitInit(); // Static vars of ValueNumStore ValueNumStore::InitValueNumStoreStatics(); compDisplayStaticSizes(jitstdout); } /***************************************************************************** * * One time finalization code */ /* static */ void Compiler::compShutdown() { if (s_pAltJitExcludeAssembliesList != nullptr) { s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pAltJitExcludeAssembliesList = nullptr; } #ifdef DEBUG if (s_pJitDisasmIncludeAssembliesList != nullptr) { s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor s_pJitDisasmIncludeAssembliesList = nullptr; } #endif // DEBUG #if MEASURE_NOWAY DisplayNowayAssertMap(); #endif // MEASURE_NOWAY /* Shut down the emitter */ emitter::emitDone(); #if defined(DEBUG) || defined(INLINE_DATA) // Finish reading and/or writing inline xml if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { InlineStrategy::FinalizeXml(file); fclose(file); } else { InlineStrategy::FinalizeXml(); } } #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS if (genMethodCnt == 0) { return; } #endif #if NODEBASH_STATS GenTree::ReportOperBashing(jitstdout); #endif // Where should we write our statistics output? FILE* fout = jitstdout; #ifdef FEATURE_JIT_METHOD_PERF if (compJitTimeLogFilename != nullptr) { FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a")); if (jitTimeLogFile != nullptr) { CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); fclose(jitTimeLogFile); } } JitTimer::Shutdown(); #endif // FEATURE_JIT_METHOD_PERF #if COUNT_AST_OPERS // Add up all the counts so that we can show percentages of total unsigned totalCount = 0; for (unsigned op = 0; op < GT_COUNT; op++) { totalCount += GenTree::s_gtNodeCounts[op]; } if (totalCount > 0) { struct OperInfo { unsigned Count; unsigned Size; genTreeOps Oper; }; OperInfo opers[GT_COUNT]; for (unsigned op = 0; op < GT_COUNT; op++) { opers[op] = {GenTree::s_gtNodeCounts[op], GenTree::s_gtTrueSizes[op], static_cast<genTreeOps>(op)}; } jitstd::sort(opers, opers + ArrLen(opers), [](const OperInfo& l, const OperInfo& r) { // We'll be sorting in descending order. return l.Count >= r.Count; }); unsigned remainingCount = totalCount; unsigned remainingCountLarge = 0; unsigned remainingCountSmall = 0; unsigned countLarge = 0; unsigned countSmall = 0; fprintf(fout, "\nGenTree operator counts (approximate):\n\n"); for (OperInfo oper : opers) { unsigned size = oper.Size; unsigned count = oper.Count; double percentage = 100.0 * count / totalCount; if (size > TREE_NODE_SZ_SMALL) { countLarge += count; } else { countSmall += count; } // Let's not show anything below a threshold if (percentage >= 0.5) { fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName(oper.Oper), count, percentage, size); remainingCount -= count; } else { if (size > TREE_NODE_SZ_SMALL) { remainingCountLarge += count; } else { remainingCountSmall += count; } } } if (remainingCount > 0) { fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", remainingCount, 100.0 * remainingCount / totalCount, 100.0 * remainingCountSmall / totalCount, 100.0 * remainingCountLarge / totalCount); } fprintf(fout, " -----------------------------------------------------\n"); fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", totalCount, 100.0 * countSmall / totalCount, 100.0 * countLarge / totalCount); fprintf(fout, "\n"); } #endif // COUNT_AST_OPERS #if DISPLAY_SIZES if (grossVMsize && grossNCsize) { fprintf(fout, "\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "Function and GC info size stats\n"); fprintf(fout, "--------------------------------------\n"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName, 100 * grossNCsize / grossVMsize, "Total (excluding GC info)"); fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName, 100 * totalNCsize / grossVMsize, "Total (including GC info)"); if (gcHeaderISize || gcHeaderNSize) { fprintf(fout, "\n"); fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n", gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize, gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), (float)gcHeaderNSize / (genMethodNCnt + 0.001), (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize, gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), (float)gcPtrMapNSize / (genMethodNCnt + 0.001), (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); } else { fprintf(fout, "\n"); fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n", totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); } #ifdef DEBUG #if DOUBLE_ALIGN fprintf(fout, "%u out of %u methods generated with double-aligned stack\n", Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); #endif #endif } #endif // DISPLAY_SIZES #if CALL_ARG_STATS compDispCallArgStats(fout); #endif #if COUNT_BASIC_BLOCKS fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Basic block count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); bbCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "IL method size frequency table for methods with a single basic block:\n"); fprintf(fout, "--------------------------------------------------\n"); bbOneBBSizeTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_BASIC_BLOCKS #if COUNT_LOOPS fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Loop stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods); fprintf(fout, "Total number of loops is %5u\n", totalLoopCount); fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod); fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows); fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount); fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows); fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount); fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount); fprintf(fout, "Total number of loops with a constant iterator is %5u\n", constIterLoopCount); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Loop exit count frequency table:\n"); fprintf(fout, "--------------------------------------------------\n"); loopExitCountTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); #endif // COUNT_LOOPS #if DATAFLOW_ITER fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n", CSEiterCount); fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n", CFiterCount); #endif // DATAFLOW_ITER #if MEASURE_NODE_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GenTree node allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n", genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, genNodeSizeStats.genTreeNodeSize / genMethodCnt); fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n", genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node counts:\n"); genTreeNcntHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n"); genTreeNsizHist.dump(fout); #endif // MEASURE_NODE_SIZE #if MEASURE_BLOCK_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count, BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt, genFlowNodeSize, genFlowNodeSize / genMethodCnt); #endif // MEASURE_BLOCK_SIZE #if MEASURE_MEM_ALLOC if (s_dspMemStats) { fprintf(fout, "\nAll allocations:\n"); ArenaAllocator::dumpAggregateMemStats(jitstdout); fprintf(fout, "\nLargest method:\n"); ArenaAllocator::dumpMaxMemStats(jitstdout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory allocated per method (in KB):\n"); memAllocHist.dump(fout); fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Distribution of total memory used per method (in KB):\n"); memUsedHist.dump(fout); } #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS #ifdef DEBUG // Always display loop stats in retail if (JitConfig.DisplayLoopHoistStats() != 0) #endif // DEBUG { PrintAggregateLoopHoistStats(jitstdout); } #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS if (JitConfig.JitEnregStats() != 0) { s_enregisterStats.Dump(fout); } #endif // TRACK_ENREG_STATS #if MEASURE_PTRTAB_SIZE fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "GC pointer table stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize, GCInfo::s_gcRegPtrDscSize / genMethodCnt); fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize, GCInfo::s_gcTotalPtrTabSize / genMethodCnt); #endif // MEASURE_PTRTAB_SIZE #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES if (genMethodCnt != 0) { fprintf(fout, "\n"); fprintf(fout, "A total of %6u methods compiled", genMethodCnt); #if DISPLAY_SIZES if (genMethodICnt || genMethodNCnt) { fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt); } #endif // DISPLAY_SIZES fprintf(fout, ".\n"); } #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES #if EMITTER_STATS emitterStats(fout); #endif #if MEASURE_FATAL fprintf(fout, "\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, "Fatal errors stats\n"); fprintf(fout, "---------------------------------------------------\n"); fprintf(fout, " badCode: %u\n", fatal_badCode); fprintf(fout, " noWay: %u\n", fatal_noWay); fprintf(fout, " implLimitation: %u\n", fatal_implLimitation); fprintf(fout, " NOMEM: %u\n", fatal_NOMEM); fprintf(fout, " noWayAssertBody: %u\n", fatal_noWayAssertBody); #ifdef DEBUG fprintf(fout, " noWayAssertBodyArgs: %u\n", fatal_noWayAssertBodyArgs); #endif // DEBUG fprintf(fout, " NYI: %u\n", fatal_NYI); #endif // MEASURE_FATAL } /***************************************************************************** * Display static data structure sizes. */ /* static */ void Compiler::compDisplayStaticSizes(FILE* fout) { #if MEASURE_NODE_SIZE GenTree::DumpNodeSizes(fout); #endif #if EMITTER_STATS emitterStaticStats(fout); #endif } /***************************************************************************** * * Constructor */ void Compiler::compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo) { assert(pAlloc); compArenaAllocator = pAlloc; // Inlinee Compile object will only be allocated when needed for the 1st time. InlineeCompiler = nullptr; // Set the inline info. impInlineInfo = inlineInfo; info.compCompHnd = compHnd; info.compMethodHnd = methodHnd; info.compMethodInfo = methodInfo; #ifdef DEBUG bRangeAllowStress = false; #endif #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS // Initialize the method name and related info, as it is used early in determining whether to // apply stress modes, and which ones to apply. // Note that even allocating memory can invoke the stress mechanism, so ensure that both // 'compMethodName' and 'compFullName' are either null or valid before we allocate. // (The stress mode checks references these prior to checking bRangeAllowStress.) // info.compMethodName = nullptr; info.compClassName = nullptr; info.compFullName = nullptr; const char* classNamePtr; const char* methodName; methodName = eeGetMethodName(methodHnd, &classNamePtr); unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); info.compMethodName = methodName; strcpy_s((char*)info.compClassName, len, classNamePtr); info.compFullName = eeGetMethodFullName(methodHnd); info.compPerfScore = 0.0; info.compMethodSuperPMIIndex = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) info.compMethodHashPrivate = 0; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef DEBUG // Opt-in to jit stress based on method hash ranges. // // Note the default (with JitStressRange not set) is that all // methods will be subject to stress. static ConfigMethodRange fJitStressRange; fJitStressRange.EnsureInit(JitConfig.JitStressRange()); assert(!fJitStressRange.Error()); bRangeAllowStress = fJitStressRange.Contains(info.compMethodHash()); #endif // DEBUG eeInfoInitialized = false; compDoAggressiveInlining = false; if (compIsForInlining()) { m_inlineStrategy = nullptr; compInlineResult = inlineInfo->inlineResult; } else { m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); compInlineResult = nullptr; } // Initialize this to the first phase to run. mostRecentlyActivePhase = PHASE_PRE_IMPORT; // Initially, no phase checks are active. activePhaseChecks = PhaseChecks::CHECK_NONE; #ifdef FEATURE_TRACELOGGING // Make sure JIT telemetry is initialized as soon as allocations can be made // but no later than a point where noway_asserts can be thrown. // 1. JIT telemetry could allocate some objects internally. // 2. NowayAsserts are tracked through telemetry. // Note: JIT telemetry could gather data when compiler is not fully initialized. // So you have to initialize the compiler variables you use for telemetry. assert((unsigned)PHASE_PRE_IMPORT == 0); info.compILCodeSize = 0; info.compMethodHnd = nullptr; compJitTelemetry.Initialize(this); #endif fgInit(); lvaInit(); if (!compIsForInlining()) { codeGen = getCodeGenerator(this); optInit(); hashBv::Init(this); compVarScopeMap = nullptr; // If this method were a real constructor for Compiler, these would // become method initializations. impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); new (&genIPmappings, jitstd::placement_t()) jitstd::list<IPmappingDsc>(getAllocator(CMK_DebugInfo)); #ifdef DEBUG new (&genPreciseIPmappings, jitstd::placement_t()) jitstd::list<PreciseIPMapping>(getAllocator(CMK_DebugOnly)); #endif lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); // // Initialize all the per-method statistics gathering data structures. // optLoopsCloned = 0; #if LOOP_HOIST_STATS m_loopsConsidered = 0; m_curLoopHasHoistedExpression = false; m_loopsWithHoistedExpressions = 0; m_totalHoistedExpressions = 0; #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genNodeSizeStatsPerFunc.Init(); #endif // MEASURE_NODE_SIZE } else { codeGen = nullptr; } compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; compTailPrefixSeen = false; compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; compSuppressedZeroInit = false; compNeedsGSSecurityCookie = false; compGSReorderStackLayout = false; compGeneratingProlog = false; compGeneratingEpilog = false; compLSRADone = false; compRationalIRForm = false; #ifdef DEBUG compCodeGenDone = false; opts.compMinOptsIsUsed = false; #endif opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; // Used to track when we should consider running EarlyProp optMethodFlags = 0; optNoReturnCallCount = 0; #ifdef DEBUG m_nodeTestData = nullptr; m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; #endif m_switchDescMap = nullptr; m_blockToEHPreds = nullptr; m_fieldSeqStore = nullptr; m_zeroOffsetFieldMap = nullptr; m_arrayInfoMap = nullptr; m_refAnyClass = nullptr; for (MemoryKind memoryKind : allMemoryKinds()) { m_memorySsaMap[memoryKind] = nullptr; } #ifdef DEBUG if (!compIsForInlining()) { compDoComponentUnitTestsOnce(); } #endif // DEBUG vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_nodeToLoopMemoryBlockMap = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; // check that HelperCallProperties are initialized assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check // We start with the flow graph in tree-order fgOrder = FGOrderTree; m_classLayoutTable = nullptr; #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD compUsesThrowHelper = false; } /***************************************************************************** * * Destructor */ void Compiler::compDone() { } void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection) /* OUT */ { void* addr; if (info.compMatchedVM) { addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); } else { // If we don't have a matched VM, we won't get valid results when asking for a helper function. addr = UlongToPtr(0xCA11CA11); // "callcall" } return addr; } unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) { var_types sigType = genActualType(JITtype2varType(cit)); unsigned sigSize; sigSize = genTypeSize(sigType); if (cit == CORINFO_TYPE_VALUECLASS) { sigSize = info.compCompHnd->getClassSize(clsHnd); } else if (cit == CORINFO_TYPE_REFANY) { sigSize = 2 * TARGET_POINTER_SIZE; } return sigSize; } #ifdef DEBUG static bool DidComponentUnitTests = false; void Compiler::compDoComponentUnitTestsOnce() { if (!JitConfig.RunComponentUnitTests()) { return; } if (!DidComponentUnitTests) { DidComponentUnitTests = true; ValueNumStore::RunTests(this); BitSetSupport::TestSuite(getAllocatorDebugOnly()); } } //------------------------------------------------------------------------ // compGetJitDefaultFill: // // Return Value: // An unsigned char value used to initizalize memory allocated by the JIT. // The default value is taken from COMPLUS_JitDefaultFill, if is not set // the value will be 0xdd. When JitStress is active a random value based // on the method hash is used. // // Notes: // Note that we can't use small values like zero, because we have some // asserts that can fire for such values. // // static unsigned char Compiler::compGetJitDefaultFill(Compiler* comp) { unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); if (comp != nullptr && comp->compStressCompile(STRESS_GENERIC_VARN, 50)) { unsigned temp; temp = comp->info.compMethodHash(); temp = (temp >> 16) ^ temp; temp = (temp >> 8) ^ temp; temp = temp & 0xff; // asserts like this: assert(!IsUninitialized(stkLvl)); // mean that small values for defaultFill are problematic // so we make the value larger in that case. if (temp < 0x20) { temp |= 0x80; } // Make a misaligned pointer value to reduce probability of getting a valid value and firing // assert(!IsUninitialized(pointer)). temp |= 0x1; defaultFill = (unsigned char)temp; } return defaultFill; } #endif // DEBUG /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) { if (isFloatReg) { assert(genIsValidFloatReg(reg)); } else { assert(genIsValidReg(reg)); } if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching register */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* If the variable is not in a register, or not in the register we're looking for, quit. */ /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ if ((varDsc->lvRegister != 0) && (varDsc->GetRegNum() == reg) && (varDsc->lvSlotNum < info.compVarScopesCount)) { /* check if variable in that register is live */ if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) { /* variable is live - find the corresponding slot */ VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); if (varScope) { return varScope->vsdName; } } } } } return nullptr; } const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { #ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif if (displayVar && (reg != REG_NA)) { VarName varName = compVarName(reg, isFloatReg); if (varName) { const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 // consecutive calls before printing static int index = 0; // for circular index into the name array index = (index + 1) % 2; // circular reuse of index sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg), VarNameToStr(varName)); return nameVarReg[index]; } } /* no debug info required or no variable in that register -> return standard name */ return getRegName(reg); } const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { if (size == 0 || size >= 4) { return compRegVarName(reg, true); } // clang-format off static const char * sizeNames[][2] = { { "al", "ax" }, { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, #ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI { "dil", "di" }, // EDI { "r8b", "r8w" }, { "r9b", "r9w" }, { "r10b", "r10w" }, { "r11b", "r11w" }, { "r12b", "r12w" }, { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, #endif // TARGET_AMD64 }; // clang-format on assert(isByteReg(reg)); assert(genRegMask(reg) & RBM_BYTE_REGS); assert(size == 1 || size == 2); return sizeNames[reg][size - 1]; } const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) { unsigned i; VarScopeDsc* t; for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) { if (t->vsdVarNum != varNum) { continue; } if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) { return VarNameToStr(t->vsdName); } } return nullptr; } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ void Compiler::compSetProcessor() { // // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codeman.cpp // const JitFlags& jitFlags = *opts.jitFlags; #if defined(TARGET_ARM) info.genCPU = CPU_ARM; #elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; #elif defined(TARGET_AMD64) info.genCPU = CPU_X64; #elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else info.genCPU = CPU_X86; #endif // // Processor specific optimizations // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 opts.compUseCMOV = true; #elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG #endif // TARGET_X86 // The VM will set the ISA flags depending on actual hardware support // and any specified config switches specified by the user. The exception // here is for certain "artificial ISAs" such as Vector64/128/256 where they // don't actually exist. The JIT is in charge of adding those and ensuring // the total sum of flags is still valid. CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags(); opts.compSupportsISA = 0; opts.compSupportsISAReported = 0; opts.compSupportsISAExactly = 0; #if defined(TARGET_XARCH) instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); instructionSetFlags.AddInstructionSet(InstructionSet_Vector256); #endif // TARGET_XARCH #if defined(TARGET_ARM64) instructionSetFlags.AddInstructionSet(InstructionSet_Vector64); instructionSetFlags.AddInstructionSet(InstructionSet_Vector128); #endif // TARGET_ARM64 instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags); opts.setSupportedISAs(instructionSetFlags); #ifdef TARGET_XARCH if (!compIsForInlining()) { if (canUseVexEncoding()) { codeGen->GetEmitter()->SetUseVEXEncoding(true); // Assume each JITted method does not contain AVX instruction at first codeGen->GetEmitter()->SetContainsAVX(false); codeGen->GetEmitter()->SetContains256bitAVX(false); } } #endif // TARGET_XARCH } bool Compiler::notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const { const char* isaString = InstructionSetToString(isa); JITDUMP("Notify VM instruction set (%s) %s be supported.\n", isaString, supported ? "must" : "must not"); return info.compCompHnd->notifyInstructionSetUsage(isa, supported); } #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 #ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } #else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } #endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED bool Compiler::compShouldThrowOnNoway( #ifdef FEATURE_TRACELOGGING const char* filename, unsigned line #endif ) { #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyNowayAssert(filename, line); #endif // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. return !opts.MinOpts(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this // value as the user intended. unsigned ReinterpretHexAsDecimal(unsigned in) { // ex: in: 0x100 returns: 100 unsigned result = 0; unsigned index = 1; // default value if (in == INT_MAX) { return in; } while (in) { unsigned digit = in % 16; in >>= 4; assert(digit < 10); result += digit * index; index *= 10; } return result; } void Compiler::compInitOptions(JitFlags* jitFlags) { #ifdef UNIX_AMD64_ABI opts.compNeedToAlignFrame = false; #endif // UNIX_AMD64_ABI memset(&opts, 0, sizeof(opts)); if (compIsForInlining()) { // The following flags are lost when inlining. (They are removed in // Compiler::fgInvokeInlineeCompiler().) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE)); assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_TRACK_TRANSITIONS)); } opts.jitFlags = jitFlags; opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { opts.compFlags = CLFLG_MINOPT; } // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { opts.compFlags = CLFLG_MINOPT; } // Default value is to generate a blend of size and speed optimizations // opts.compCodeOpt = BLENDED_CODE; // If the EE sets SIZE_OPT or if we are compiling a Class constructor // we will optimize for code size at the expense of speed // if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { opts.compCodeOpt = SMALL_CODE; } // // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size // else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) { opts.compCodeOpt = FAST_CODE; assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); } //------------------------------------------------------------------------- opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; opts.compJitAlignLoopBoundary = (unsigned short)JitConfig.JitAlignLoopBoundary(); opts.compJitAlignLoopMinBlockWeight = (unsigned short)JitConfig.JitAlignLoopMinBlockWeight(); opts.compJitAlignLoopForJcc = JitConfig.JitAlignLoopForJcc() == 1; opts.compJitAlignLoopMaxCodeSize = (unsigned short)JitConfig.JitAlignLoopMaxCodeSize(); opts.compJitHideAlignBehindJmp = JitConfig.JitHideAlignBehindJmp() == 1; #else opts.compJitAlignLoopAdaptive = true; opts.compJitAlignLoopBoundary = DEFAULT_ALIGN_LOOP_BOUNDARY; opts.compJitAlignLoopMinBlockWeight = DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT; opts.compJitAlignLoopMaxCodeSize = DEFAULT_MAX_LOOPSIZE_FOR_ALIGN; opts.compJitHideAlignBehindJmp = true; #endif #ifdef TARGET_XARCH if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is equal to the max instruction encoding // size which is 15 bytes. Hence (32 >> 1) - 1 = 15 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1) - 1; } else { // For non-adaptive alignment, padding limit is 1 less than the alignment boundary // specified. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary - 1; } #elif TARGET_ARM64 if (opts.compJitAlignLoopAdaptive) { // For adaptive alignment, padding limit is same as specified by the alignment // boundary because all instructions are 4 bytes long. Hence (32 >> 1) = 16 bytes. opts.compJitAlignPaddingLimit = (opts.compJitAlignLoopBoundary >> 1); } else { // For non-adaptive, padding limit is same as specified by the alignment. opts.compJitAlignPaddingLimit = opts.compJitAlignLoopBoundary; } #endif assert(isPow2(opts.compJitAlignLoopBoundary)); #ifdef TARGET_ARM64 // The minimum encoding size for Arm64 is 4 bytes. assert(opts.compJitAlignLoopBoundary >= 4); #endif #if REGEN_SHORTCUTS || REGEN_CALLPAT // We never want to have debugging enabled when regenerating GC encoding patterns opts.compDbgCode = false; opts.compDbgInfo = false; opts.compDbgEnC = false; #endif compSetProcessor(); #ifdef DEBUG opts.dspOrder = false; // Optionally suppress inliner compiler instance dumping. // if (compIsForInlining()) { if (JitConfig.JitDumpInlinePhases() > 0) { verbose = impInlineInfo->InlinerCompiler->verbose; } else { verbose = false; } } else { verbose = false; codeGen->setVerbose(false); } verboseTrees = verbose && shouldUseVerboseTrees(); verboseSsa = verbose && shouldUseVerboseSsa(); asciiTrees = shouldDumpASCIITrees(); opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; #endif opts.altJit = false; #if defined(LATE_DISASM) && !defined(DEBUG) // For non-debug builds with the late disassembler built in, we currently always do late disassembly // (we have no way to determine when not to, since we don't have class/method names). // In the DEBUG case, this is initialized to false, below. opts.doLateDisasm = true; #endif #ifdef DEBUG const JitConfigValues::MethodSet* pfAltJit; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { pfAltJit = &JitConfig.AltJitNgen(); } else { pfAltJit = &JitConfig.AltJit(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.altJit = true; } unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) { opts.altJit = false; } } #else // !DEBUG const char* altJitVal; if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { altJitVal = JitConfig.AltJitNgen().list(); } else { altJitVal = JitConfig.AltJit().list(); } if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore // it. You don't get to give a regular expression of methods to match. // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it // might be expensive to do so.) if ((altJitVal != nullptr) && (strcmp(altJitVal, "*") == 0)) { opts.altJit = true; } } #endif // !DEBUG // Take care of COMPlus_AltJitExcludeAssemblies. if (opts.altJit) { // First, initialize the AltJitExcludeAssemblies list, but only do it once. if (!s_pAltJitExcludeAssembliesListInitialized) { const WCHAR* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); if (wszAltJitExcludeAssemblyList != nullptr) { // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is // reclaimed // for every compilation. This is ok because we only allocate once, due to the static. s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); } s_pAltJitExcludeAssembliesListInitialized = true; } if (s_pAltJitExcludeAssembliesList != nullptr) { // We have an exclusion list. See if this method is in an assembly that is on the list. // Note that we check this for every method, since we might inline across modules, and // if the inlinee module is on the list, we don't want to use the altjit for it. const char* methodAssemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) { opts.altJit = false; } } } #ifdef DEBUG bool altJitConfig = !pfAltJit->isEmpty(); // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables // would apply to both the altjit and the normal JIT, but we only care about // debugging the altjit if the COMPlus_AltJit configuration is set. // if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) { if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitImportBreak reached"); } } bool verboseDump = false; if (!altJitConfig || opts.altJit) { // We should only enable 'verboseDump' when we are actually compiling a matching method // and not enable it when we are just considering inlining a matching method. // if (!compIsForInlining()) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) { verboseDump = true; } } else { if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { verboseDump = true; } unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) { verboseDump = true; } } } } // Optionally suppress dumping Tier0 jit requests. // if (verboseDump && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { verboseDump = (JitConfig.JitDumpTier0() > 0); } // Optionally suppress dumping except for a specific OSR jit request. // const int dumpAtOSROffset = JitConfig.JitDumpAtOSROffset(); if (verboseDump && (dumpAtOSROffset != -1)) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { verboseDump = (((IL_OFFSET)dumpAtOSROffset) == info.compILEntry); } else { verboseDump = false; } } if (verboseDump) { verbose = true; } #endif // DEBUG #ifdef FEATURE_SIMD // Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86. featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD); setUsesSIMDTypes(false); #endif // FEATURE_SIMD lvaEnregEHVars = (compEnregLocals() && JitConfig.EnableEHWriteThru()); lvaEnregMultiRegVars = (compEnregLocals() && JitConfig.EnableMultiRegLocals()); if (compIsForImportOnly()) { return; } #if FEATURE_TAILCALL_OPT // By default opportunistic tail call optimization is enabled. // Recognition is done in the importer so this must be set for // inlinees as well. opts.compTailCallOpt = true; #endif // FEATURE_TAILCALL_OPT #if FEATURE_FASTTAILCALL // By default fast tail calls are enabled. opts.compFastTailCalls = true; #endif // FEATURE_FASTTAILCALL // Profile data // fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgPgoQueryResult = E_FAIL; fgPgoFailReason = nullptr; fgPgoSource = ICorJitInfo::PgoSource::Unknown; if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) { fgPgoQueryResult = info.compCompHnd->getPgoInstrumentationResults(info.compMethodHnd, &fgPgoSchema, &fgPgoSchemaCount, &fgPgoData, &fgPgoSource); // a failed result that also has a non-NULL fgPgoSchema // indicates that the ILSize for the method no longer matches // the ILSize for the method when profile data was collected. // // We will discard the IBC data in this case // if (FAILED(fgPgoQueryResult)) { fgPgoFailReason = (fgPgoSchema != nullptr) ? "No matching PGO data" : "No PGO data"; fgPgoData = nullptr; fgPgoSchema = nullptr; } // Optionally, disable use of profile data. // else if (JitConfig.JitDisablePgo() > 0) { fgPgoFailReason = "PGO data available, but JitDisablePgo > 0"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } #ifdef DEBUG // Optionally, enable use of profile data for only some methods. // else { static ConfigMethodRange JitEnablePgoRange; JitEnablePgoRange.EnsureInit(JitConfig.JitEnablePgoRange()); // Base this decision on the root method hash, so a method either sees all available // profile data (including that for inlinees), or none of it. // const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnablePgoRange.Contains(hash)) { fgPgoFailReason = "PGO data available, but method hash NOT within JitEnablePgoRange"; fgPgoQueryResult = E_FAIL; fgPgoData = nullptr; fgPgoSchema = nullptr; fgPgoDisabled = true; } } // A successful result implies a non-NULL fgPgoSchema // if (SUCCEEDED(fgPgoQueryResult)) { assert(fgPgoSchema != nullptr); } // A failed result implies a NULL fgPgoSchema // see implementation of Compiler::fgHaveProfileData() // if (FAILED(fgPgoQueryResult)) { assert(fgPgoSchema == nullptr); } #endif } if (compIsForInlining()) { return; } // The rest of the opts fields that we initialize here // should only be used when we generate code for the method // They should not be used when importing or inlining CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_TAILCALL_OPT opts.compTailCallLoopOpt = true; #endif // FEATURE_TAILCALL_OPT opts.genFPorder = true; opts.genFPopt = true; opts.instrCount = 0; opts.lvRefCount = 0; #ifdef PROFILING_SUPPORTED opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED #if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; #endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; opts.dspLines = false; opts.varNames = false; opts.dmpHex = false; opts.disAsm = false; opts.disAsmSpilled = false; opts.disDiffable = false; opts.disAddr = false; opts.disAlignment = false; opts.dspCode = false; opts.dspEHTable = false; opts.dspDebugInfo = false; opts.dspGCtbls = false; opts.disAsm2 = false; opts.dspUnwind = false; opts.compLongAddress = false; opts.optRepeat = false; #ifdef LATE_DISASM opts.doLateDisasm = false; #endif // LATE_DISASM compDebugBreak = false; // If we have a non-empty AltJit config then we change all of these other // config values to refer only to the AltJit. // if (!altJitConfig || opts.altJit) { if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((JitConfig.NgenOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } else { bool disEnabled = true; // Setup assembly name list for disassembly, if not already set up. if (!s_pJitDisasmIncludeAssembliesListInitialized) { const WCHAR* assemblyNameList = JitConfig.JitDisasmAssemblies(); if (assemblyNameList != nullptr) { s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); } s_pJitDisasmIncludeAssembliesListInitialized = true; } // If we have an assembly name list for disassembly, also check this method's assembly. if (s_pJitDisasmIncludeAssembliesList != nullptr && !s_pJitDisasmIncludeAssembliesList->IsEmpty()) { const char* assemblyName = info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) { disEnabled = false; } } if (disEnabled) { if ((JitConfig.JitOrder() & 1) == 1) { opts.dspOrder = true; } if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspGCtbls = true; } if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.disAsm = true; } if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr)) { opts.disAsmSpilled = true; } if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspUnwind = true; } if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspEHTable = true; } if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.dspDebugInfo = true; } } } if (opts.disAsm && JitConfig.JitDisasmWithGC()) { opts.disasmWithGC = true; } #ifdef LATE_DISASM if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) opts.doLateDisasm = true; #endif // LATE_DISASM // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 if (JitConfig.DiffableDasm() != 0) { opts.disDiffable = true; opts.dspDiffable = true; } // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDasmWithAddress=1 if (JitConfig.JitDasmWithAddress() != 0) { opts.disAddr = true; } if (JitConfig.JitDasmWithAlignmentBoundaries() != 0) { opts.disAlignment = true; } if (JitConfig.JitLongAddress() != 0) { opts.compLongAddress = true; } if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.optRepeat = true; } } if (verboseDump) { opts.dspCode = true; opts.dspEHTable = true; opts.dspGCtbls = true; opts.disAsm2 = true; opts.dspUnwind = true; verbose = true; verboseTrees = shouldUseVerboseTrees(); verboseSsa = shouldUseVerboseSsa(); codeGen->setVerbose(true); } treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); morphNum = 0; // Initialize the morphed-trees counting. expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); if (expensiveDebugCheckLevel == 0) { // If we're in a stress mode that modifies the flowgraph, make 1 the default. if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { expensiveDebugCheckLevel = 1; } } if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { assert(!"JitBreak reached"); } unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) { assert(!"JitHashBreak reached"); } if (verbose || JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { compDebugBreak = true; } memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); // Read function list, if not already read, and there exists such a list. if (!s_pJitFunctionFileInitialized) { const WCHAR* functionFileName = JitConfig.JitFunctionFile(); if (functionFileName != nullptr) { s_pJitMethodSet = new (HostAllocator::getHostAllocator()) MethodSet(functionFileName, HostAllocator::getHostAllocator()); } s_pJitFunctionFileInitialized = true; } #endif // DEBUG //------------------------------------------------------------------------- #ifdef DEBUG assert(!codeGen->isGCTypeFixed()); opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif #if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, STACK_CHECK_ON_CALL = 0x2, STACK_CHECK_ALL = 0x3 }; DWORD dwJitStackChecks = JitConfig.JitStackChecks(); if (compStressCompile(STRESS_GENERIC_VARN, 5)) { dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; #if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; #endif // defined(TARGET_X86) #endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); #endif #ifdef PROFILING_SUPPORTED opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); // Cache the profiler handle if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) { bool hookNeeded; bool indirected; info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); compProfilerHookNeeded = !!hookNeeded; compProfilerMethHndIndirected = !!indirected; } else { compProfilerHookNeeded = false; compProfilerMethHnd = nullptr; compProfilerMethHndIndirected = false; } // Honour COMPlus_JitELTHookEnabled or STRESS_PROFILER_CALLBACKS stress mode // only if VM has not asked us to generate profiler hooks in the first place. // That is, override VM only if it hasn't asked for a profiler callback for this method. // Don't run this stress mode when pre-JITing, as we would need to emit a relocation // for the call to the fake ELT hook, which wouldn't make sense, as we can't store that // in the pre-JIT image. if (!compProfilerHookNeeded) { if ((JitConfig.JitELTHookEnabled() != 0) || (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && compStressCompile(STRESS_PROFILER_CALLBACKS, 5))) { opts.compJitELTHookEnabled = true; } } // TBD: Exclude PInvoke stubs if (opts.compJitELTHookEnabled) { compProfilerMethHnd = (void*)DummyProfilerELTStub; compProfilerMethHndIndirected = false; } #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT const WCHAR* strTailCallOpt = JitConfig.TailCallOpt(); if (strTailCallOpt != nullptr) { opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; } if (JitConfig.TailCallLoopOpt() == 0) { opts.compTailCallLoopOpt = false; } #endif #if FEATURE_FASTTAILCALL if (JitConfig.FastTailCalls() == 0) { opts.compFastTailCalls = false; } #endif // FEATURE_FASTTAILCALL #ifdef CONFIGURABLE_ARM_ABI opts.compUseSoftFP = jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI); unsigned int softFPConfig = opts.compUseSoftFP ? 2 : 1; unsigned int oldSoftFPConfig = InterlockedCompareExchange(&GlobalJitOptions::compUseSoftFPConfigured, softFPConfig, 0); if (oldSoftFPConfig != softFPConfig && oldSoftFPConfig != 0) { // There are no current scenarios where the abi can change during the lifetime of a process // that uses the JIT. If such a change occurs, either compFeatureHfa will need to change to a TLS static // or we will need to have some means to reset the flag safely. NO_WAY("SoftFP ABI setting changed during lifetime of process"); } GlobalJitOptions::compFeatureHfa = !opts.compUseSoftFP; #elif defined(ARM_SOFTFP) && defined(TARGET_ARM) // Armel is unconditionally enabled in the JIT. Verify that the VM side agrees. assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #elif defined(TARGET_ARM) assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SOFTFP_ABI)); #endif // CONFIGURABLE_ARM_ABI opts.compScopeInfo = opts.compDbgInfo; #ifdef LATE_DISASM codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig); #endif //------------------------------------------------------------------------- opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG #if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif #endif // DEBUG opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); #ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; #endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; #endif // DEBUG if (opts.compProcedureSplitting) { // Note that opts.compdbgCode is true under ngen for checked assemblies! opts.compProcedureSplitting = !opts.compDbgCode; #ifdef DEBUG // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. // This is useful for debugging on a checked build. Note that we still only do procedure // splitting in the zapper. if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = true; } // JitNoProcedureSplitting will always disable procedure splitting. if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplitting = false; } // // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { opts.compProcedureSplittingEH = false; } #endif } #ifdef DEBUG // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) { compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); if (verbose) { printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject); } } if (verbose) { // If we are compiling for a specific tier, make that very obvious in the output. // Note that we don't expect multiple TIER flags to be set at one time, but there // is nothing preventing that. if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) { printf("OPTIONS: Tier-1 compilation\n"); } if (compSwitchedToOptimized) { printf("OPTIONS: Tier-0 compilation, switched to FullOpts\n"); } if (compSwitchedToMinOpts) { printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n"); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } printf("OPTIONS: compCodeOpt = %s\n", (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); printf("OPTIONS: compDbgEnC = %s\n", dspBool(opts.compDbgEnC)); printf("OPTIONS: compProcedureSplitting = %s\n", dspBool(opts.compProcedureSplitting)); printf("OPTIONS: compProcedureSplittingEH = %s\n", dspBool(opts.compProcedureSplittingEH)); if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) { printf("OPTIONS: optimized using %s profile data\n", pgoSourceToString(fgPgoSource)); } if (fgPgoFailReason != nullptr) { printf("OPTIONS: %s\n", fgPgoFailReason); } if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { printf("OPTIONS: Jit invoked for ngen\n"); } } #endif #ifdef PROFILING_SUPPORTED #ifdef UNIX_AMD64_ABI if (compIsProfilerHookNeeded()) { opts.compNeedToAlignFrame = true; } #endif // UNIX_AMD64_ABI #endif #if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } #endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG bool Compiler::compJitHaltMethod() { /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ /* Note that this these two "Jit" environment variables also work for ngen images */ if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } /* Use this Hash variant when there are a lot of method with the same name and different signatures */ unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) { return true; } return false; } /***************************************************************************** * Should we use a "stress-mode" for the given stressArea. We have different * areas to allow the areas to be mixed in different combinations in * different methods. * 'weight' indicates how often (as a percentage) the area should be stressed. * It should reflect the usefulness:overhead ratio. */ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { #define STRESS_MODE(mode) W("STRESS_") W(#mode), STRESS_MODES #undef STRESS_MODE }; //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // Methods may be excluded from stress via name or hash. // // Particular stress modes may be disabled or forcibly enabled. // // With JitStress=2, some stress modes are enabled regardless of weight; // these modes are the ones after COUNT_VARN in the enumeration. // // For other modes or for nonzero JitStress values, stress will be // enabled selectively for roughly weight% of methods. // bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) { // This can be called early, before info is fully set up. if ((info.compMethodName == nullptr) || (info.compFullName == nullptr)) { return false; } // Inlinees defer to the root method for stress, so that we can // more easily isolate methods that cause stress failures. if (compIsForInlining()) { return impInlineRoot()->compStressCompile(stressArea, weight); } const bool doStress = compStressCompileHelper(stressArea, weight); if (doStress && !compActiveStressModes[stressArea]) { if (verbose) { printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]); } compActiveStressModes[stressArea] = 1; } return doStress; } //------------------------------------------------------------------------ // compStressCompileHelper: helper to determine if a stress mode should be enabled // // Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables // // Returns: // true if this stress mode is enabled // // Notes: // See compStressCompile // bool Compiler::compStressCompileHelper(compStressArea stressArea, unsigned weight) { if (!bRangeAllowStress) { return false; } if (!JitConfig.JitStressOnly().isEmpty() && !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return false; } // Does user explicitly prevent using this STRESS_MODE through the command line? const WCHAR* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); if ((strStressModeNamesNot != nullptr) && (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) { return false; } // Does user explicitly set this STRESS_MODE through the command line? const WCHAR* strStressModeNames = JitConfig.JitStressModeNames(); if (strStressModeNames != nullptr) { if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) { return true; } // This stress mode name did not match anything in the stress // mode allowlist. If user has requested only enable mode, // don't allow this stress mode to turn on. const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; if (onlyEnableMode) { return false; } } // 0: No stress (Except when explicitly set in complus_JitStressModeNames) // !=2: Vary stress. Performance will be slightly/moderately degraded // 2: Check-all stress. Performance will be REALLY horrible const int stressLevel = getJitStressLevel(); assert(weight <= MAX_STRESS_WEIGHT); // Check for boundary conditions if (stressLevel == 0 || weight == 0) { return false; } // Should we allow unlimited stress ? if ((stressArea > STRESS_COUNT_VARN) && (stressLevel == 2)) { return true; } if (weight == MAX_STRESS_WEIGHT) { return true; } // Get a hash which can be compared with 'weight' assert(stressArea != 0); const unsigned hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); return (hash < weight); } //------------------------------------------------------------------------ // compPromoteFewerStructs: helper to determine if the local // should not be promoted under a stress mode. // // Arguments: // lclNum - local number to test // // Returns: // true if this local should not be promoted. // // Notes: // Reject ~50% of the potential promotions if STRESS_PROMOTE_FEWER_STRUCTS is active. // bool Compiler::compPromoteFewerStructs(unsigned lclNum) { bool rejectThisPromo = false; const bool promoteLess = compStressCompile(STRESS_PROMOTE_FEWER_STRUCTS, 50); if (promoteLess) { rejectThisPromo = (((info.compMethodHash() ^ lclNum) & 1) == 0); } return rejectThisPromo; } #endif // DEBUG void Compiler::compInitDebuggingInfo() { #ifdef DEBUG if (verbose) { printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName); } #endif /*------------------------------------------------------------------------- * * Get hold of the local variable records, if there are any */ info.compVarScopesCount = 0; if (opts.compScopeInfo) { eeGetVars(); } compInitVarScopeMap(); if (opts.compScopeInfo || opts.compDbgCode) { compInitScopeLists(); } if (opts.compDbgCode && (info.compVarScopesCount > 0)) { /* Create a new empty basic block. fgExtendDbgLifetimes() may add initialization of variables which are in scope right from the start of the (real) first BB (and therefore artificially marked as alive) into this block. */ fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, gtNewNothingNode()); JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n", fgFirstBB->dspToString()); } /*------------------------------------------------------------------------- * * Read the stmt-offsets table and the line-number table */ info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; // We can only report debug info for EnC at places where the stack is empty. // Actually, at places where there are not live temps. Else, we won't be able // to map between the old and the new versions correctly as we won't have // any info for the live temps. assert(!opts.compDbgEnC || !opts.compDbgInfo || 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); info.compStmtOffsetsCount = 0; if (opts.compDbgInfo) { /* Get hold of the line# records, if there are any */ eeGetStmtOffsets(); #ifdef DEBUG if (verbose) { printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount); printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit); if (info.compStmtOffsetsImplicit) { printf(" ( "); if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) { printf("STACK_EMPTY "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) { printf("NOP "); } if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { printf("CALL_SITE "); } printf(")"); } printf("\n"); IL_OFFSET* pOffs = info.compStmtOffsets; for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) { printf("%02d) IL_%04Xh\n", i, *pOffs); } } #endif } } void Compiler::compSetOptimizationLevel() { bool theMinOptsValue; #pragma warning(suppress : 4101) unsigned jitMinOpts; if (compIsForInlining()) { theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); goto _SetMinOpts; } theMinOptsValue = false; if (opts.compFlags == CLFLG_MINOPT) { JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); theMinOptsValue = true; } #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); if (!theMinOptsValue && (jitMinOpts > 0)) { // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; unsigned methodCountMask = methodCount & 0xFFF; unsigned kind = (jitMinOpts & 0xF000000) >> 24; switch (kind) { default: if (jitMinOpts <= methodCount) { if (verbose) { printf(" Optimizations disabled by JitMinOpts and methodCount\n"); } theMinOptsValue = true; } break; case 0xD: { unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { if (verbose) { printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xE: { unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { if (verbose) { printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; case 0xF: { unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { if (verbose) { printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); } theMinOptsValue = true; } } break; } } if (!theMinOptsValue) { if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { theMinOptsValue = true; } } #if 0 // The code in this #if can be used to debug optimization issues according to method hash. // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("minoptshashlo"); unsigned methHashLo = 0; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); char* histr = getenv("minoptshashhi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); if (methHash >= methHashLo && methHash <= methHashHi) { printf("MinOpts for method %s, hash = %08x.\n", info.compFullName, methHash); printf(""); // in our logic this causes a flush theMinOptsValue = true; } } } #endif #endif if (compStressCompile(STRESS_MIN_OPTS, 5)) { theMinOptsValue = true; } // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); theMinOptsValue = true; } if (theMinOptsValue == true) { JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); if (JitConfig.JitBreakOnMinOpts() != 0) { assert(!"MinOpts enabled"); } } } #else // !DEBUG // Retail check if we should force Minopts due to the complexity of the method // For PREJIT we never drop down to MinOpts // unless unless CLFLG_MINOPT is set if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) { theMinOptsValue = true; } #endif // DEBUG JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); #if 0 // The code in this #if has been useful in debugging loop cloning issues, by // enabling selective enablement of the loop cloning optimization according to // method hash. #ifdef DEBUG if (!theMinOptsValue) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("opthashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. } char* histr = getenv("opthashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. } if (methHash < methHashLo || methHash > methHashHi) { theMinOptsValue = true; } else { printf("Doing optimization in in %s (0x%x).\n", info.compFullName, methHash); } } #endif #endif _SetMinOpts: // Set the MinOpts value opts.SetMinOpts(theMinOptsValue); // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode) { info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT); opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER1); compSwitchedToMinOpts = true; } #ifdef DEBUG if (verbose && !compIsForInlining()) { printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false"); } #endif /* Control the optimizations */ if (opts.OptimizationDisabled()) { opts.compFlags &= ~CLFLG_MAXOPT; opts.compFlags |= CLFLG_MINOPT; } if (!compIsForInlining()) { codeGen->setFramePointerRequired(false); codeGen->setFrameRequired(false); if (opts.OptimizationDisabled()) { codeGen->setFrameRequired(true); } #if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame // pointer for the frame to show up in stack walk. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) codeGen->setFrameRequired(true); #endif if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // The JIT doesn't currently support loop alignment for prejitted images. // (The JIT doesn't know the final address of the code, hence // it can't align code based on unknown addresses.) codeGen->SetAlignLoops(false); // loop alignment not supported for prejitted code } else { codeGen->SetAlignLoops(JitConfig.JitAlignLoops() == 1); } } #if TARGET_ARM // A single JitStress=1 Linux ARM32 test fails when we expand virtual calls early // JIT\HardwareIntrinsics\General\Vector128_1\Vector128_1_ro // opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() == 2); #else opts.compExpandCallsEarly = (JitConfig.JitExpandCallsEarly() != 0); #endif fgCanRelocateEHRegions = true; } #ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so // that it can be use to form large offsets when accessing stack // based LclVar including both incoming and out going argument areas. // // The method advances the frame layout state to curState by calling // lvaFrameSize(curState). // bool Compiler::compRsvdRegCheck(FrameLayoutState curState) { // Always do the layout even if returning early. Callers might // depend on us to do the layout. unsigned frameSize = lvaFrameSize(curState); JITDUMP("\n" "compRsvdRegCheck\n" " frame size = %6d\n" " compArgSize = %6d\n", frameSize, compArgSize); if (opts.MinOpts()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large // temps. JITDUMP(" Returning true (MinOpts)\n\n"); return true; } unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters noway_assert(frameSize >= calleeSavedRegMaxSz); #if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now #else // TARGET_ARM // frame layout: // // ... high addresses ... // frame contents size // ------------------- ------------------------ // inArgs compArgSize (includes prespill) // caller SP ---> // prespill // LR REGSIZE_BYTES // R11 ---> R11 REGSIZE_BYTES // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) // lclSize // incl. TEMPS MAX_SPILL_TEMP_SIZE // incl. outArgs // SP ---> // ... low addresses ... // // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. // We can then use R11 to access incoming args with positive offsets, and LclVars with // negative offsets. // // In functions with EH, in the non-funclet (or main) region, even though we will have a // frame pointer, we can use SP with positive offsets to access any or all locals or arguments // that we can reach with SP-relative encodings. The funclet region might require the reserved // register, since it must use offsets from R11 to access the parent frame. unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxR11PositiveEncodingOffset = %6d\n", maxR11PositiveEncodingOffset); // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we // don't know if there are either no integer locals, or if we don't need large negative offsets // for the integer locals, so we must use the integer max negative offset, which is a // smaller (absolute value) number. unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. JITDUMP(" maxR11NegativeEncodingOffset = %6d\n", maxR11NegativeEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxR11PositiveOffset = %6d\n", maxR11PositiveOffset); // The value is positive, but represents a negative offset from R11. // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 // (+0 and +4, respectively), so don't include those in the max possible negative offset. assert(frameSize >= (2 * REGSIZE_BYTES)); unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); JITDUMP(" maxR11NegativeOffset = %6d\n", maxR11NegativeOffset); if (codeGen->isFramePointerRequired()) { if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n"); return true; } if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) { JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n"); return true; } } // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based // frames in the non-funclet main code area. unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; JITDUMP(" maxSPPositiveEncodingOffset = %6d\n", maxSPPositiveEncodingOffset); // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. assert(compArgSize + frameSize > 0); unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; if (codeGen->isFramePointerUsed()) { // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. // We will still generate SP-relative offsets if SP can reach. // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; JITDUMP(" maxSPLocalsCombinedOffset = %6d\n", maxSPLocalsCombinedOffset); if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) { // Can R11 help? unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n", maxRemainingLocalsCombinedOffset); if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n"); return true; } // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. } // Check whether either R11 or SP can access the arguments. if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) { JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n"); return true; } } else { if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) { JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n"); return true; } } // We won't need to reserve REG_OPT_RSVD. // JITDUMP(" Returning false\n\n"); return false; #endif // TARGET_ARM } #endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings // for this method // // Arguments: // wantShortName - true if a short name is ok (say for using in file names) // // Returns: // String describing tiering decisions for this method, including cases // where the jit codegen will differ from what the runtime requested. // const char* Compiler::compGetTieringName(bool wantShortName) const { const bool tier0 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0); const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) { return "Tier0"; } else if (tier1) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { return "Tier1-OSR"; } else { return "Tier1"; } } else if (opts.OptimizationEnabled()) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts" : "Tier-0 switched to FullOpts"; } else { return "FullOpts"; } } else if (opts.MinOpts()) { if (compSwitchedToMinOpts) { if (compSwitchedToOptimized) { return wantShortName ? "Tier0-FullOpts-MinOpts" : "Tier-0 switched to FullOpts, then to MinOpts"; } else { return wantShortName ? "Tier0-MinOpts" : "Tier-0 switched MinOpts"; } } else { return "MinOpts"; } } else if (opts.compDbgCode) { return "Debug"; } else { return wantShortName ? "Unknown" : "Unknown optimization level"; } } //------------------------------------------------------------------------ // compGetStressMessage: get a string describing jitstress capability // for this method // // Returns: // An empty string if stress is not enabled, else a string describing // if this method is subject to stress or is excluded by name or hash. // const char* Compiler::compGetStressMessage() const { // Add note about stress where appropriate const char* stressMessage = ""; #ifdef DEBUG // Is stress enabled via mode name or level? if ((JitConfig.JitStressModeNames() != nullptr) || (getJitStressLevel() > 0)) { // Is the method being jitted excluded from stress via range? if (bRangeAllowStress) { // Or is it excluded via name? if (!JitConfig.JitStressOnly().isEmpty() || !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { // Not excluded -- stress can happen stressMessage = " JitStress"; } else { stressMessage = " NoJitStress(Only)"; } } else { stressMessage = " NoJitStress(Range)"; } } #endif // DEBUG return stressMessage; } void Compiler::compFunctionTraceStart() { #ifdef DEBUG if (compIsForInlining()) { return; } if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); if (newJitNestingLevel <= 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel - 1; i++) { printf(" "); } printf("{ Start Jitting Method %4d %s (MethodHash=%08x) %s\n", Compiler::jitTotalMethodCompiled, info.compFullName, info.compMethodHash(), compGetTieringName()); /* } editor brace matching workaround for this printf */ } #endif // DEBUG } void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) { #ifdef DEBUG assert(!compIsForInlining()); if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) { LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); if (newJitNestingLevel < 0) { printf("{ Illegal nesting level %d }\n", newJitNestingLevel); } for (LONG i = 0; i < newJitNestingLevel; i++) { printf(" "); } // Note: that is incorrect if we are compiling several methods at the same time. unsigned methodNumber = Compiler::jitTotalMethodCompiled - 1; /* { editor brace-matching workaround for following printf */ printf("} Jitted Method %4d at" FMT_ADDR "method %s size %08x%s%s\n", methodNumber, DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""), opts.altJit ? " altjit" : ""); } #endif // DEBUG } //------------------------------------------------------------------------ // BeginPhase: begin execution of a phase // // Arguments: // phase - the phase that is about to begin // void Compiler::BeginPhase(Phases phase) { mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // EndPhase: finish execution of a phase // // Arguments: // phase - the phase that has just finished // void Compiler::EndPhase(Phases phase) { #if defined(FEATURE_JIT_METHOD_PERF) if (pCompJitTimer != nullptr) { pCompJitTimer->EndPhase(this, phase); } #endif mostRecentlyActivePhase = phase; } //------------------------------------------------------------------------ // compCompile: run phases needed for compilation // // Arguments: // methodCodePtr [OUT] - address of generated code // methodCodeSize [OUT] - size of the generated code (hot + cold setions) // compileFlags [IN] - flags controlling jit behavior // // Notes: // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of // importing, morphing, optimizations and code generation. This is called from the EE through the // code:CILJit::compileMethod function. // // For an overview of the structure of the JIT, see: // https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/jit/ryujit-overview.md // // Also called for inlinees, though they will only be run through the first few phases. // void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // Prepare for importation // auto preImportPhase = [this]() { if (compIsForInlining()) { // Notify root instance that an inline attempt is about to import IL impInlineRoot()->m_inlineStrategy->NoteImport(); } hashBv::Init(this); VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); // The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. if (info.compPublishStubParam) { assert(lvaStubArgumentVar == BAD_VAR_NUM); lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument")); lvaGetDesc(lvaStubArgumentVar)->lvType = TYP_I_IMPL; // TODO-CQ: there is no need to mark it as doNotEnreg. There are no stores for this local // before codegen so liveness and LSRA mark it as "liveIn" and always allocate a stack slot for it. // However, it would be better to process it like other argument locals and keep it in // a reg for the whole method without spilling to the stack when possible. lvaSetVarDoNotEnregister(lvaStubArgumentVar DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } }; DoPhase(this, PHASE_PRE_IMPORT, preImportPhase); compFunctionTraceStart(); // Incorporate profile data. // // Note: the importer is sensitive to block weights, so this has // to happen before importation. // DoPhase(this, PHASE_INCPROFILE, &Compiler::fgIncorporateProfileData); // If we're going to instrument code, we may need to prepare before // we import. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCPREP, &Compiler::fgPrepareToInstrumentMethod); } // Import: convert the instrs in each basic block to a tree based intermediate representation // DoPhase(this, PHASE_IMPORTATION, &Compiler::fgImport); // Expand any patchpoints // DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints); // If instrumenting, add block and class probes. // if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { DoPhase(this, PHASE_IBCINSTR, &Compiler::fgInstrumentMethod); } // Transform indirect calls that require control flow expansion. // DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls); // PostImportPhase: cleanup inlinees // auto postImportPhase = [this]() { // If this is a viable inline candidate if (compIsForInlining() && !compDonotInline()) { // Filter out unimported BBs in the inlinee // fgPostImportationCleanup(); // Update type of return spill temp if we have gathered // better info when importing the inlinee, and the return // spill temp is single def. if (fgNeedReturnSpillTemp()) { CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; if (retExprClassHnd != nullptr) { LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); if (returnSpillVarDsc->lvSingleDef) { lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, impInlineInfo->retExprClassHndIsExact); } } } } }; DoPhase(this, PHASE_POST_IMPORT, postImportPhase); // If we're importing for inlining, we're done. if (compIsForInlining()) { #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer != nullptr) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); } #endif return; } // At this point in the phase list, all the inlinee phases have // been run, and inlinee compiles have exited, so we should only // get this far if we are jitting the root method. noway_assert(!compIsForInlining()); // Maybe the caller was not interested in generating code if (compIsForImportOnly()) { compFunctionTraceEnd(nullptr, 0, false); return; } #if !FEATURE_EH // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. fgRemoveEH(); #endif // !FEATURE_EH // We could allow ESP frames. Just need to reserve space for // pushing EBP if the method becomes an EBP-frame after an edit. // Note that requiring a EBP Frame disallows double alignment. Thus if we change this // we either have to disallow double alignment for E&C some other way or handle it in EETwain. if (opts.compDbgEnC) { codeGen->setFramePointerRequired(true); // We don't care about localloc right now. If we do support it, // EECodeManager::FixContextForEnC() needs to handle it smartly // in case the localloc was actually executed. // // compLocallocUsed = true; } // Start phases that are broadly called morphing, and includes // global morph, as well as other phases that massage the trees so // that we can generate code out of them. // auto morphInitPhase = [this]() { // Initialize the BlockSet epoch NewBasicBlockEpoch(); fgOutgoingArgTemps = nullptr; // Insert call to class constructor as the first basic block if // we were asked to do so. if (info.compCompHnd->initClass(nullptr /* field */, nullptr /* method */, impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER) { fgEnsureFirstBBisScratch(); fgNewStmtAtBeg(fgFirstBB, fgInitThisClass()); } #ifdef DEBUG if (opts.compGcChecks) { for (unsigned i = 0; i < info.compArgsCount; i++) { if (lvaGetDesc(i)->TypeGet() == TYP_REF) { // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTree* op = gtNewLclvNode(i, TYP_REF); GenTreeCall::Use* args = gtNewCallArgs(op); op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, args); fgEnsureFirstBBisScratch(); fgNewStmtAtEnd(fgFirstBB, op); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op); } } } } #endif // DEBUG #if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaSetVarDoNotEnregister(lvaReturnSpCheck, DoNotEnregisterReason::ReturnSpCheck); lvaGetDesc(lvaReturnSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaGetDesc(lvaCallSpCheck)->lvType = TYP_I_IMPL; } #endif // defined(DEBUG) && defined(TARGET_X86) // Update flow graph after importation. // Removes un-imported blocks, trims EH, and ensures correct OSR entry flow. // fgPostImportationCleanup(); }; DoPhase(this, PHASE_MORPH_INIT, morphInitPhase); #ifdef DEBUG // Inliner could add basic blocks. Check that the flowgraph data is up-to-date fgDebugCheckBBlist(false, false); #endif // DEBUG // Inline callee methods into this root method // DoPhase(this, PHASE_MORPH_INLINE, &Compiler::fgInline); // Record "start" values for post-inlining cycles and elapsed time. RecordStateAtEndOfInlining(); // Transform each GT_ALLOCOBJ node into either an allocation helper call or // local variable allocation on the stack. ObjectAllocator objectAllocator(this); // PHASE_ALLOCATE_OBJECTS if (compObjectStackAllocation() && opts.OptimizationEnabled()) { objectAllocator.EnableObjectStackAllocation(); } objectAllocator.Run(); // Add any internal blocks/trees we may need // DoPhase(this, PHASE_MORPH_ADD_INTERNAL, &Compiler::fgAddInternal); // Remove empty try regions // DoPhase(this, PHASE_EMPTY_TRY, &Compiler::fgRemoveEmptyTry); // Remove empty finally regions // DoPhase(this, PHASE_EMPTY_FINALLY, &Compiler::fgRemoveEmptyFinally); // Streamline chains of finally invocations // DoPhase(this, PHASE_MERGE_FINALLY_CHAINS, &Compiler::fgMergeFinallyChains); // Clone code in finallys to reduce overhead for non-exceptional paths // DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Update finally target flags after EH optimizations // DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if DEBUG if (lvaEnregEHVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitEHWTHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitEHWTHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregEHVars = false; } else if (dump) { printf("Enregistering EH Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } if (lvaEnregMultiRegVars) { unsigned methHash = info.compMethodHash(); char* lostr = getenv("JitMultiRegHashLo"); unsigned methHashLo = 0; bool dump = false; if (lostr != nullptr) { sscanf_s(lostr, "%x", &methHashLo); dump = true; } char* histr = getenv("JitMultiRegHashHi"); unsigned methHashHi = UINT32_MAX; if (histr != nullptr) { sscanf_s(histr, "%x", &methHashHi); dump = true; } if (methHash < methHashLo || methHash > methHashHi) { lvaEnregMultiRegVars = false; } else if (dump) { printf("Enregistering MultiReg Vars for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // flush } } #endif // Compute bbNum, bbRefs and bbPreds // // This is the first time full (not cheap) preds will be computed. // And, if we have profile data, we can now check integrity. // // From this point on the flowgraph information such as bbNum, // bbRefs or bbPreds has to be kept updated. // auto computePredsPhase = [this]() { JITDUMP("\nRenumbering the basic blocks for fgComputePred\n"); fgRenumberBlocks(); noway_assert(!fgComputePredsDone); fgComputePreds(); }; DoPhase(this, PHASE_COMPUTE_PREDS, computePredsPhase); // Now that we have pred lists, do some flow-related optimizations // if (opts.OptimizationEnabled()) { // Merge common throw blocks // DoPhase(this, PHASE_MERGE_THROWS, &Compiler::fgTailMergeThrows); // Run an early flow graph simplification pass // auto earlyUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_EARLY_UPDATE_FLOW_GRAPH, earlyUpdateFlowGraphPhase); } // Promote struct locals // auto promoteStructsPhase = [this]() { // For x64 and ARM64 we need to mark irregular parameters lvaRefCountState = RCS_EARLY; fgResetImplicitByRefRefCount(); fgPromoteStructs(); }; DoPhase(this, PHASE_PROMOTE_STRUCTS, promoteStructsPhase); // Figure out what locals are address-taken. // DoPhase(this, PHASE_STR_ADRLCL, &Compiler::fgMarkAddressExposedLocals); // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); // Apply the type update to implicit byref parameters; also choose (based on address-exposed // analysis) which implicit byref promotions to keep (requires copy to initialize) or discard. // DoPhase(this, PHASE_MORPH_IMPBYREF, &Compiler::fgRetypeImplicitByRefArgs); #ifdef DEBUG // Now that locals have address-taken and implicit byref marked, we can safely apply stress. lvaStressLclFld(); fgStress64RsltMul(); #endif // DEBUG // Morph the trees in all the blocks of the method // auto morphGlobalPhase = [this]() { unsigned prevBBCount = fgBBcount; fgMorphBlocks(); // Fix any LclVar annotations on discarded struct promotion temps for implicit by-ref args fgMarkDemotedImplicitByRefArgs(); lvaRefCountState = RCS_INVALID; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); fgExpandQmarkNodes(); #ifdef DEBUG compCurBB = nullptr; #endif // DEBUG // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } // We can now enable all phase checking activePhaseChecks = PhaseChecks::CHECK_ALL; }; DoPhase(this, PHASE_MORPH_GLOBAL, morphGlobalPhase); // GS security checks for unsafe buffers // auto gsPhase = [this]() { unsigned prevBBCount = fgBBcount; if (getNeedsGSSecurityCookie()) { gsGSChecksInitCookie(); if (compGSReorderStackLayout) { gsCopyShadowParams(); } // If we needed to create any new BasicBlocks then renumber the blocks if (fgBBcount > prevBBCount) { fgRenumberBlocks(); } } else { JITDUMP("No GS security needed\n"); } }; DoPhase(this, PHASE_GS_COOKIE, gsPhase); // Compute the block and edge weights // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS, &Compiler::fgComputeBlockAndEdgeWeights); #if defined(FEATURE_EH_FUNCLETS) // Create funclets from the EH handlers. // DoPhase(this, PHASE_CREATE_FUNCLETS, &Compiler::fgCreateFunclets); #endif // FEATURE_EH_FUNCLETS if (opts.OptimizationEnabled()) { // Invert loops // DoPhase(this, PHASE_INVERT_LOOPS, &Compiler::optInvertLoops); // Optimize block order // DoPhase(this, PHASE_OPTIMIZE_LAYOUT, &Compiler::optOptimizeLayout); // Compute reachability sets and dominators. // DoPhase(this, PHASE_COMPUTE_REACHABILITY, &Compiler::fgComputeReachability); // Scale block weights and mark run rarely blocks. // DoPhase(this, PHASE_SET_BLOCK_WEIGHTS, &Compiler::optSetBlockWeights); // Discover and classify natural loops (e.g. mark iterative loops as such). Also marks loop blocks // and sets bbWeight to the loop nesting levels. // DoPhase(this, PHASE_FIND_LOOPS, &Compiler::optFindLoopsPhase); // Clone loops with optimization opportunities, and choose one based on dynamic condition evaluation. // DoPhase(this, PHASE_CLONE_LOOPS, &Compiler::optCloneLoops); // Unroll loops // DoPhase(this, PHASE_UNROLL_LOOPS, &Compiler::optUnrollLoops); // Clear loop table info that is not used after this point, and might become invalid. // DoPhase(this, PHASE_CLEAR_LOOP_INFO, &Compiler::optClearLoopIterInfo); } #ifdef DEBUG fgDebugCheckLinks(); #endif // Create the variable table (and compute variable ref counts) // DoPhase(this, PHASE_MARK_LOCAL_VARS, &Compiler::lvaMarkLocalVars); // IMPORTANT, after this point, locals are ref counted. // However, ref counts are not kept incrementally up to date. assert(lvaLocalVarRefCounted()); if (opts.OptimizationEnabled()) { // Optimize boolean conditions // DoPhase(this, PHASE_OPTIMIZE_BOOLS, &Compiler::optOptimizeBools); // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. } // Figure out the order in which operators are to be evaluated // DoPhase(this, PHASE_FIND_OPER_ORDER, &Compiler::fgFindOperOrder); // Weave the tree lists. Anyone who modifies the tree shapes after // this point is responsible for calling fgSetStmtSeq() to keep the // nodes properly linked. // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). // DoPhase(this, PHASE_SET_BLOCK_ORDER, &Compiler::fgSetBlockOrder); // At this point we know if we are fully interruptible or not if (opts.OptimizationEnabled()) { bool doSsa = true; bool doEarlyProp = true; bool doValueNum = true; bool doLoopHoisting = true; bool doCopyProp = true; bool doBranchOpt = true; bool doAssertionProp = true; bool doRangeAnalysis = true; int iterations = 1; #if defined(OPT_CONFIG) doSsa = (JitConfig.JitDoSsa() != 0); doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); doBranchOpt = doValueNum && (JitConfig.JitDoRedundantBranchOpts() != 0); doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); if (opts.optRepeat) { iterations = JitConfig.JitOptRepeatCount(); } #endif // defined(OPT_CONFIG) while (iterations > 0) { if (doSsa) { // Build up SSA form for the IR // DoPhase(this, PHASE_BUILD_SSA, &Compiler::fgSsaBuild); } if (doEarlyProp) { // Propagate array length and rewrite getType() method call // DoPhase(this, PHASE_EARLY_PROP, &Compiler::optEarlyProp); } if (doValueNum) { // Value number the trees // DoPhase(this, PHASE_VALUE_NUMBER, &Compiler::fgValueNumber); } if (doLoopHoisting) { // Hoist invariant code out of loops // DoPhase(this, PHASE_HOIST_LOOP_CODE, &Compiler::optHoistLoopCode); } if (doCopyProp) { // Perform VN based copy propagation // DoPhase(this, PHASE_VN_COPY_PROP, &Compiler::optVnCopyProp); } if (doBranchOpt) { DoPhase(this, PHASE_OPTIMIZE_BRANCHES, &Compiler::optRedundantBranches); } // Remove common sub-expressions // DoPhase(this, PHASE_OPTIMIZE_VALNUM_CSES, &Compiler::optOptimizeCSEs); if (doAssertionProp) { // Assertion propagation // DoPhase(this, PHASE_ASSERTION_PROP_MAIN, &Compiler::optAssertionPropMain); } if (doRangeAnalysis) { auto rangePhase = [this]() { RangeCheck rc(this); rc.OptimizeRangeChecks(); }; // Bounds check elimination via range analysis // DoPhase(this, PHASE_OPTIMIZE_INDEX_CHECKS, rangePhase); } if (fgModified) { // update the flowgraph if we modified it during the optimization phase // auto optUpdateFlowGraphPhase = [this]() { constexpr bool doTailDup = false; fgUpdateFlowGraph(doTailDup); }; DoPhase(this, PHASE_OPT_UPDATE_FLOW_GRAPH, optUpdateFlowGraphPhase); // Recompute the edge weight if we have modified the flow graph // DoPhase(this, PHASE_COMPUTE_EDGE_WEIGHTS2, &Compiler::fgComputeEdgeWeights); } // Iterate if requested, resetting annotations first. if (--iterations == 0) { break; } ResetOptAnnotations(); RecomputeLoopInfo(); } } // Insert GC Polls DoPhase(this, PHASE_INSERT_GC_POLLS, &Compiler::fgInsertGCPolls); // Determine start of cold region if we are hot/cold splitting // DoPhase(this, PHASE_DETERMINE_FIRST_COLD_BLOCK, &Compiler::fgDetermineFirstColdBlock); #ifdef DEBUG fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); // Stash the current estimate of the function's size if necessary. if (verbose) { compSizeEstimate = 0; compCycleEstimate = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { compSizeEstimate += stmt->GetCostSz(); compCycleEstimate += stmt->GetCostEx(); } } } #endif // rationalize trees Rationalizer rat(this); // PHASE_RATIONALIZE rat.Run(); // Here we do "simple lowering". When the RyuJIT backend works for all // platforms, this will be part of the more general lowering phase. For now, though, we do a separate // pass of "final lowering." We must do this before (final) liveness analysis, because this creates // range check throw blocks, in which the liveness must be correct. // DoPhase(this, PHASE_SIMPLE_LOWERING, &Compiler::fgSimpleLowering); // Enable this to gather statistical data such as // call and register argument info, flowgraph and loop info, etc. compJitStats(); #ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } #endif // TARGET_ARM // Assign registers to variables, etc. /////////////////////////////////////////////////////////////////////////////// // Dominator and reachability sets are no longer valid. They haven't been // maintained up to here, and shouldn't be used (unless recomputed). /////////////////////////////////////////////////////////////////////////////// fgDomsComputed = false; // Create LinearScan before Lowering, so that Lowering can call LinearScan methods // for determining whether locals are register candidates and (for xarch) whether // a node is a containable memory op. m_pLinearScan = getLinearScanAllocator(this); // Lower // m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); if (!compMacOsArm64Abi()) { // Set stack levels; this information is necessary for x86 // but on other platforms it is used only in asserts. // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. StackLevelSetter stackLevelSetter(this); stackLevelSetter.Run(); } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; // Now that lowering is completed we can proceed to perform register allocation // auto linearScanPhase = [this]() { m_pLinearScan->doLinearScan(); }; DoPhase(this, PHASE_LINEAR_SCAN, linearScanPhase); // Copied from rpPredictRegUse() SetFullPtrRegMapRequired(codeGen->GetInterruptible() || !codeGen->isFramePointerUsed()); #if FEATURE_LOOP_ALIGN // Place loop alignment instructions DoPhase(this, PHASE_ALIGN_LOOPS, &Compiler::placeLoopAlignInstructions); #endif // Generate code codeGen->genGenerateCode(methodCodePtr, methodCodeSize); #if TRACK_LSRA_STATS if (JitConfig.DisplayLsraStats() == 2) { m_pLinearScan->dumpLsraStatsCsv(jitstdout); } #endif // TRACK_LSRA_STATS // We're done -- set the active phase to the last phase // (which isn't really a phase) mostRecentlyActivePhase = PHASE_POST_EMIT; #ifdef FEATURE_JIT_METHOD_PERF if (pCompJitTimer) { #if MEASURE_CLRAPI_CALLS EndPhase(PHASE_CLR_API); #else EndPhase(PHASE_POST_EMIT); #endif pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); } #endif // Generate PatchpointInfo generatePatchpointInfo(); RecordStateAtEndOfCompilation(); #ifdef FEATURE_TRACELOGGING compJitTelemetry.NotifyEndOfCompilation(); #endif #if defined(DEBUG) ++Compiler::jitTotalMethodCompiled; #endif // defined(DEBUG) compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); JITDUMP("Method code size: %d\n", (unsigned)(*methodCodeSize)); #if FUNC_INFO_LOGGING if (compJitFuncInfoFile != nullptr) { assert(!compIsForInlining()); #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. fprintf(compJitFuncInfoFile, "%s\n", info.compFullName); #elif FEATURE_SIMD fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd)); #endif fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush } #endif // FUNC_INFO_LOGGING } #if FEATURE_LOOP_ALIGN //------------------------------------------------------------------------ // placeLoopAlignInstructions: Iterate over all the blocks and determine // the best position to place the 'align' instruction. Inserting 'align' // instructions after an unconditional branch is preferred over inserting // in the block before the loop. In case there are multiple blocks // having 'jmp', the one that has lower weight is preferred. // If the block having 'jmp' is hotter than the block before the loop, // the align will still be placed after 'jmp' because the processor should // be smart enough to not fetch extra instruction beyond jmp. // void Compiler::placeLoopAlignInstructions() { if (loopAlignCandidates == 0) { return; } int loopsToProcess = loopAlignCandidates; JITDUMP("Inside placeLoopAlignInstructions for %d loops.\n", loopAlignCandidates); // Add align only if there were any loops that needed alignment weight_t minBlockSoFar = BB_MAX_WEIGHT; BasicBlock* bbHavingAlign = nullptr; BasicBlock::loopNumber currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; if ((fgFirstBB != nullptr) && fgFirstBB->isLoopAlign()) { // Adding align instruction in prolog is not supported // hence just remove that loop from our list. loopsToProcess--; } for (BasicBlock* const block : Blocks()) { if (currentAlignedLoopNum != BasicBlock::NOT_IN_LOOP) { // We've been processing blocks within an aligned loop. Are we out of that loop now? if (currentAlignedLoopNum != block->bbNatLoopNum) { currentAlignedLoopNum = BasicBlock::NOT_IN_LOOP; } } // If there is a unconditional jump (which is not part of callf/always pair) if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) { if (currentAlignedLoopNum == BasicBlock::NOT_IN_LOOP) { // Ok to insert align instruction in this block because it is not part of any aligned loop. minBlockSoFar = block->bbWeight; bbHavingAlign = block; JITDUMP(FMT_BB ", bbWeight=" FMT_WT " ends with unconditional 'jmp' \n", block->bbNum, block->bbWeight); } } } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) { // If jmp was not found, then block before the loop start is where align instruction will be added. if (bbHavingAlign == nullptr) { bbHavingAlign = block; JITDUMP("Marking " FMT_BB " before the loop with BBF_HAS_ALIGN for loop at " FMT_BB "\n", block->bbNum, block->bbNext->bbNum); } else { JITDUMP("Marking " FMT_BB " that ends with unconditional jump with BBF_HAS_ALIGN for loop at " FMT_BB "\n", bbHavingAlign->bbNum, block->bbNext->bbNum); } bbHavingAlign->bbFlags |= BBF_HAS_ALIGN; minBlockSoFar = BB_MAX_WEIGHT; bbHavingAlign = nullptr; currentAlignedLoopNum = block->bbNext->bbNatLoopNum; if (--loopsToProcess == 0) { break; } } } assert(loopsToProcess == 0); } #endif //------------------------------------------------------------------------ // generatePatchpointInfo: allocate and fill in patchpoint info data, // and report it to the VM // void Compiler::generatePatchpointInfo() { if (!doesMethodHavePatchpoints() && !doesMethodHavePartialCompilationPatchpoints()) { // Nothing to report return; } // Patchpoints are only found in Tier0 code, which is unoptimized, and so // should always have frame pointer. assert(codeGen->isFramePointerUsed()); // Allocate patchpoint info storage from runtime, and fill in initial bits of data. const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize); // Patchpoint offsets always refer to "virtual frame offsets". // // For x64 this falls out because Tier0 frames are always FP frames, and so the FP-relative // offset is what we want. // // For arm64, if the frame pointer is not at the top of the frame, we need to adjust the // offset. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // We add +TARGET_POINTER_SIZE here is to account for the slot that Jit_Patchpoint // creates when it simulates calling the OSR method (the "pseudo return address" slot). // This is effectively a new slot at the bottom of the Tier0 frame. // const int totalFrameSize = codeGen->genTotalFrameSize() + TARGET_POINTER_SIZE; const int offsetAdjust = 0; #elif defined(TARGET_ARM64) // SP is not manipulated by calls so no frame size adjustment needed. // Local Offsets may need adjusting, if FP is at bottom of frame. // const int totalFrameSize = codeGen->genTotalFrameSize(); const int offsetAdjust = codeGen->genSPtoFPdelta() - totalFrameSize; #else NYI("patchpoint info generation"); const int offsetAdjust = 0; const int totalFrameSize = 0; #endif patchpointInfo->Initialize(info.compLocalsCount, totalFrameSize); JITDUMP("--OSR--- Total Frame Size %d, local offset adjust is %d\n", patchpointInfo->TotalFrameSize(), offsetAdjust); // We record offsets for all the "locals" here. Could restrict // this to just the IL locals with some extra logic, and save a bit of space, // but would need to adjust all consumers, too. for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++) { LclVarDsc* const varDsc = lvaGetDesc(lclNum); // We expect all these to have stack homes, and be FP relative assert(varDsc->lvOnFrame); assert(varDsc->lvFramePointerBased); // Record FramePtr relative offset (no localloc yet) patchpointInfo->SetOffset(lclNum, varDsc->GetStackOffset() + offsetAdjust); // Note if IL stream contained an address-of that potentially leads to exposure. // This bit of IL may be skipped by OSR partial importation. if (varDsc->lvHasLdAddrOp) { patchpointInfo->SetIsExposed(lclNum); } JITDUMP("--OSR-- V%02u is at virtual offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum), patchpointInfo->IsExposed(lclNum) ? " (exposed)" : ""); } // Special offsets // if (lvaReportParamTypeArg()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetGenericContextArgOffset(offset + offsetAdjust); JITDUMP("--OSR-- cached generic context virtual offset is %d\n", patchpointInfo->GenericContextArgOffset()); } if (lvaKeepAliveAndReportThis()) { const int offset = lvaCachedGenericContextArgOffset(); patchpointInfo->SetKeptAliveThisOffset(offset + offsetAdjust); JITDUMP("--OSR-- kept-alive this virtual offset is %d\n", patchpointInfo->KeptAliveThisOffset()); } if (compGSReorderStackLayout) { assert(lvaGSSecurityCookie != BAD_VAR_NUM); LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie); patchpointInfo->SetSecurityCookieOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- security cookie V%02u virtual offset is %d\n", lvaGSSecurityCookie, patchpointInfo->SecurityCookieOffset()); } if (lvaMonAcquired != BAD_VAR_NUM) { LclVarDsc* const varDsc = lvaGetDesc(lvaMonAcquired); patchpointInfo->SetMonitorAcquiredOffset(varDsc->GetStackOffset() + offsetAdjust); JITDUMP("--OSR-- monitor acquired V%02u virtual offset is %d\n", lvaMonAcquired, patchpointInfo->MonitorAcquiredOffset()); } #if defined(TARGET_AMD64) // Record callee save registers. // Currently only needed for x64. // regMaskTP rsPushRegs = codeGen->regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; rsPushRegs |= RBM_FPBASE; patchpointInfo->SetCalleeSaveRegisters((uint64_t)rsPushRegs); JITDUMP("--OSR-- Tier0 callee saves: "); JITDUMPEXEC(dspRegMask((regMaskTP)patchpointInfo->CalleeSaveRegisters())); JITDUMP("\n"); #endif // Register this with the runtime. info.compCompHnd->setPatchpointInfo(patchpointInfo); } //------------------------------------------------------------------------ // ResetOptAnnotations: Clear annotations produced during global optimizations. // // Notes: // The intent of this method is to clear any information typically assumed // to be set only once; it is used between iterations when JitOptRepeat is // in effect. void Compiler::ResetOptAnnotations() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); fgResetForSsa(); vnStore = nullptr; m_opAsgnVarDefSsaNums = nullptr; m_blockToEHPreds = nullptr; fgSsaPassesCompleted = 0; fgVNPassesCompleted = 0; for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const tree : stmt->TreeList()) { tree->ClearVN(); tree->ClearAssertion(); tree->gtCSEnum = NO_CSE; } } } } //------------------------------------------------------------------------ // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. // // Notes: // The intent of this method is to update loop structure annotations, and those // they depend on; these annotations may have become stale during optimization, // and need to be up-to-date before running another iteration of optimizations. // void Compiler::RecomputeLoopInfo() { assert(opts.optRepeat); assert(JitConfig.JitOptRepeatCount() > 0); // Recompute reachability sets, dominators, and loops. optResetLoopInfo(); fgDomsComputed = false; fgComputeReachability(); optSetBlockWeights(); // Rebuild the loop tree annotations themselves optFindLoops(); } /*****************************************************************************/ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } /*****************************************************************************/ #ifdef DEBUG void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging bool Compiler::skipMethod() { static ConfigMethodRange fJitRange; fJitRange.EnsureInit(JitConfig.JitRange()); assert(!fJitRange.Error()); // Normally JitConfig.JitRange() is null, we don't want to skip // jitting any methods. // // So, the logic below relies on the fact that a null range string // passed to ConfigMethodRange represents the set of all methods. if (!fJitRange.Contains(info.compMethodHash())) { return true; } if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) { return true; } return false; } #endif /*****************************************************************************/ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { // compInit should have set these already. noway_assert(info.compMethodInfo != nullptr); noway_assert(info.compCompHnd != nullptr); noway_assert(info.compMethodHnd != nullptr); #ifdef FEATURE_JIT_METHOD_PERF static bool checkedForJitTimeLog = false; pCompJitTimer = nullptr; if (!checkedForJitTimeLog) { // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for // retail builds. Do not call the regular Config helper here as it would pull // in a copy of the config parser into the clrjit.dll. InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, (LPCWSTR)info.compCompHnd->getJitTimeLogFilename(), NULL); // At a process or module boundary clear the file and start afresh. JitTimer::PrintCsvHeader(); checkedForJitTimeLog = true; } if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) { pCompJitTimer = JitTimer::Create(this, info.compMethodInfo->ILCodeSize); } #endif // FEATURE_JIT_METHOD_PERF #ifdef DEBUG Compiler* me = this; forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build // set this early so we can use it without relying on random memory values verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; #endif #if FUNC_INFO_LOGGING LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); if (tmpJitFuncInfoFilename != nullptr) { LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); if (oldFuncInfoFileName == nullptr) { assert(compJitFuncInfoFile == nullptr); compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { #if defined(DEBUG) && !defined(HOST_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); #endif // defined(DEBUG) && !defined(HOST_UNIX) } } } #endif // FUNC_INFO_LOGGING // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); if (compIsForInlining()) { compileFlags->Clear(JitFlags::JIT_FLAG_OSR); info.compILEntry = 0; info.compPatchpointInfo = nullptr; } else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { // Fetch OSR info from the runtime info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry); assert(info.compPatchpointInfo != nullptr); } #if defined(TARGET_ARM64) compFrameInfo = {0}; #endif virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); // compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements // // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM // with an ARM-targeting "altjit"). // Match CPU/ABI for compMatchedVM info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); #ifdef TARGET_OS_RUNTIMEDETERMINED noway_assert(TargetOS::OSSettingConfigured); #endif if (TargetOS::IsMacOS) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); } else if (TargetOS::IsUnix) { if (TargetArchitecture::IsX64) { // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit info.compMatchedVM = info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); } else { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); } } else if (TargetOS::IsWindows) { info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); } // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for // the target, as the VM's expected target may overlap bit flags with different meaning to our target. // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) if (!info.compMatchedVM) { #if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. #endif // defined(TARGET_ARM) #if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this // should be altered to possibly enable all of them, when they are known to all work. CORINFO_InstructionSetFlags defaultArm64Flags; defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase); defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd); defaultArm64Flags.Set64BitInstructionSetVariants(); compileFlags->SetInstructionSetFlags(defaultArm64Flags); #endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; // Set the context for token lookup. if (compIsForInlining()) { impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; assert(impInlineInfo->inlineCandidateInfo->clsHandle == info.compCompHnd->getMethodClass(info.compMethodHnd)); info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, // info.compCompHnd->getClassAttribs(info.compClassHnd)); info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; } else { impTokenLookupContextHandle = METHOD_BEING_COMPILED_CONTEXT(); info.compClassHnd = info.compCompHnd->getMethodClass(info.compMethodHnd); info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { // This call to getClassModule/getModuleAssembly/getAssemblyName fails in crossgen2 due to these // APIs being unimplemented. So disable this extra info for pre-jit mode. See // https://github.com/dotnet/runtime/issues/48888. // // Ditto for some of the class name queries for generic params. // if (!compileFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // Get the assembly name, to aid finding any particular SuperPMI method context function (void)info.compCompHnd->getAssemblyName( info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); // Fetch class names for the method's generic parameters. // CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(info.compMethodHnd, &sig, nullptr); const unsigned classInst = sig.sigInst.classInstCount; if (classInst > 0) { for (unsigned i = 0; i < classInst; i++) { eeGetClassName(sig.sigInst.classInst[i]); } } const unsigned methodInst = sig.sigInst.methInstCount; if (methodInst > 0) { for (unsigned i = 0; i < methodInst; i++) { eeGetClassName(sig.sigInst.methInst[i]); } } } } #endif // DEBUG info.compProfilerCallback = false; // Assume false until we are told to hook this method. #ifdef DEBUG if (!compIsForInlining()) { JitTls::GetLogEnv()->setCompiler(this); } // Have we been told to be more selective in our Jitting? if (skipMethod()) { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); } return CORJIT_SKIPPED; } #endif // DEBUG // Set this before the first 'BADCODE' // Skip verification where possible assert(compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); /* Setup an error trap */ struct Param { Compiler* pThis; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; int result; } param; param.pThis = this; param.classPtr = classPtr; param.compHnd = info.compCompHnd; param.methodInfo = info.compMethodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.result = CORJIT_INTERNALERROR; setErrorTrap(info.compCompHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block { pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() // ERROR TRAP: The following block handles errors { /* Cleanup */ if (compIsForInlining()) { goto DoneCleanUp; } /* Tell the emitter that we're done with this function */ GetEmitter()->emitEndCG(); DoneCleanUp: compDone(); } endErrorTrap() // ERROR TRAP: End return param.result; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // compMethodHash: get hash code for currently jitted method // // Returns: // Hash based on method's full name // unsigned Compiler::Info::compMethodHash() const { if (compMethodHashPrivate == 0) { // compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); assert(compFullName != nullptr); assert(*compFullName != 0); COUNT_T hash = HashStringA(compFullName); // Use compFullName to generate the hash, as it contains the signature // and return type compMethodHashPrivate = hash; } return compMethodHashPrivate; } //------------------------------------------------------------------------ // compMethodHash: get hash code for specified method // // Arguments: // methodHnd - method of interest // // Returns: // Hash based on method's full name // unsigned Compiler::compMethodHash(CORINFO_METHOD_HANDLE methodHnd) { // If this is the root method, delegate to the caching version // if (methodHnd == info.compMethodHnd) { return info.compMethodHash(); } // Else compute from scratch. Might consider caching this too. // unsigned methodHash = 0; const char* calleeName = eeGetMethodFullName(methodHnd); if (calleeName != nullptr) { methodHash = HashStringA(calleeName); } else { methodHash = info.compCompHnd->getMethodHash(methodHnd); } return methodHash; } #endif // defined(DEBUG) || defined(INLINE_DATA) void Compiler::compCompileFinish() { #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS genMethodCnt++; #endif #if MEASURE_MEM_ALLOC { compArenaAllocator->finishMemStats(); memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); } #ifdef DEBUG if (s_dspMemStats || verbose) { printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); compArenaAllocator->dumpMemStats(jitstdout); } #endif // DEBUG #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS AddLoopHoistStats(); #endif // LOOP_HOIST_STATS #if MEASURE_NODE_SIZE genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); #endif #if defined(DEBUG) // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else // we should bump up ArenaAllocator::getDefaultPageSize() if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge // struct (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? // Small methods cannot meaningfully have a big number of locals // or arguments. We always track arguments at the start of // the prolog which requires memory (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc (getJitStressLevel() == 0) && // We need extra memory for stress !opts.optRepeat && // We need extra memory to repeat opts !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for // DirectAlloc // Factor of 2x is because data-structures are bigger under DEBUG (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && !verbose) // We allocate lots of memory to convert sets to strings for JitDump { genSmallMethodsNeedingExtraMemoryCnt++; // Less than 1% of all methods should run into this. // We cannot be more strict as there are always degenerate cases where we // would need extra memory (like huge structs as locals - see lvaSetStruct()). assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); } #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) m_inlineStrategy->DumpData(); if (JitConfig.JitInlineDumpXmlFile() != nullptr) { FILE* file = _wfopen(JitConfig.JitInlineDumpXmlFile(), W("a")); if (file != nullptr) { m_inlineStrategy->DumpXml(file); fclose(file); } else { m_inlineStrategy->DumpXml(); } } else { m_inlineStrategy->DumpXml(); } #endif #ifdef DEBUG if (opts.dspOrder) { // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); static bool headerPrinted = false; if (!headerPrinted) { // clang-format off headerPrinted = true; printf(" | Profiled | Method | Method has | calls | Num |LclV |AProp| CSE | Perf |bytes | %3s codesize| \n", Target::g_tgtCPUName); printf(" mdToken | CNT | RGN | Hash | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Score | IL | HOT | CLD | method name \n"); printf("---------+------+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-----+\n"); // 06001234 | 1234 | HOT | 0f1e2d3c | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 1234.56 | 145 | 1234 | 123 | System.Example(int) // clang-format on } printf("%08X | ", currentMethodToken); if (fgHaveProfileData()) { if (fgCalledCount < 1000) { printf("%4.0f | ", fgCalledCount); } else if (fgCalledCount < 1000000) { printf("%3.0fK | ", fgCalledCount / 1000); } else { printf("%3.0fM | ", fgCalledCount / 1000000); } } else { printf(" | "); } CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; if (opts.altJit) { printf("ALT | "); } else if (regionKind == CORINFO_REGION_NONE) { printf(" | "); } else if (regionKind == CORINFO_REGION_HOT) { printf(" HOT | "); } else if (regionKind == CORINFO_REGION_COLD) { printf("COLD | "); } else if (regionKind == CORINFO_REGION_JIT) { printf(" JIT | "); } else { printf("UNKN | "); } printf("%08x | ", info.compMethodHash()); if (compHndBBtabCount > 0) { printf("EH | "); } else { printf(" | "); } if (rpFrameType == FT_EBP_FRAME) { printf("%3s | ", STR_FPBASE); } else if (rpFrameType == FT_ESP_FRAME) { printf("%3s | ", STR_SPBASE); } #if DOUBLE_ALIGN else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) { printf("dbl | "); } #endif else // (rpFrameType == FT_NOT_SET) { printf("??? | "); } if (fgHasLoops) { printf("LOOP |"); } else { printf(" |"); } printf(" %3d |", optCallCount); printf(" %3d |", optIndirectCallCount); printf(" %3d |", fgBBcountAtCodegen); printf(" %3d |", lvaCount); if (opts.MinOpts()) { printf(" MinOpts |"); } else { printf(" %3d |", optAssertionCount); printf(" %3d |", optCSEcount); } if (info.compPerfScore < 9999.995) { printf(" %7.2f |", info.compPerfScore); } else { printf(" %7.0f |", info.compPerfScore); } printf(" %4d |", info.compMethodInfo->ILCodeSize); printf(" %5d |", info.compTotalHotCodeSize); printf(" %3d |", info.compTotalColdCodeSize); printf(" %s\n", eeGetMethodFullName(info.compMethodHnd)); printf(""); // in our logic this causes a flush } if (verbose) { printf("****** DONE compiling %s\n", info.compFullName); printf(""); // in our logic this causes a flush } #if TRACK_ENREG_STATS for (unsigned i = 0; i < lvaCount; ++i) { const LclVarDsc* varDsc = lvaGetDesc(i); if (varDsc->lvRefCnt() != 0) { s_enregisterStats.RecordLocal(varDsc); } } #endif // TRACK_ENREG_STATS // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing // For ngen the int3 or breakpoint instruction will be right at the // start of the ngen method and we will stop when we execute it. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (compJitHaltMethod()) { #if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 // that the JIT put in the code _DbgBreakCheck(__FILE__, __LINE__, "JitHalt"); #endif } } #endif // DEBUG } #ifdef PSEUDORANDOM_NOP_INSERTION // this is zlib adler32 checksum. source came from windows base #define BASE 65521L // largest prime smaller than 65536 #define NMAX 5552 // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 #define DO1(buf, i) \ { \ s1 += buf[i]; \ s2 += s1; \ } #define DO2(buf, i) \ DO1(buf, i); \ DO1(buf, i + 1); #define DO4(buf, i) \ DO2(buf, i); \ DO2(buf, i + 2); #define DO8(buf, i) \ DO4(buf, i); \ DO4(buf, i + 4); #define DO16(buf) \ DO8(buf, 0); \ DO8(buf, 8); unsigned adler32(unsigned adler, char* buf, unsigned int len) { unsigned int s1 = adler & 0xffff; unsigned int s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif unsigned getMethodBodyChecksum(_In_z_ char* code, int size) { #ifdef PSEUDORANDOM_NOP_INSERTION return adler32(0, code, size); #else return 0; #endif } int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags) { CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; info.compCode = methodInfo->ILCode; info.compILCodeSize = methodInfo->ILCodeSize; info.compILImportSize = 0; if (info.compILCodeSize == 0) { BADCODE("code size is zero"); } if (compIsForInlining()) { #ifdef DEBUG unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); #endif info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; compInlineContext = impInlineInfo->inlineContext; } else { info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); #ifdef PSEUDORANDOM_NOP_INSERTION info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); #endif compInlineContext = m_inlineStrategy->GetRootContext(); } compSwitchedToOptimized = false; compSwitchedToMinOpts = false; // compInitOptions will set the correct verbose flag. compInitOptions(compileFlags); if (!compIsForInlining() && !opts.altJit && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT)) { // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, // so skip it. return CORJIT_SKIPPED; } #ifdef DEBUG if (verbose) { printf("IL to import:\n"); dumpILRange(info.compCode, info.compILCodeSize); } #endif // Check for COMPlus_AggressiveInlining if (JitConfig.JitAggressiveInlining()) { compDoAggressiveInlining = true; } if (compDoAggressiveInlining) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } #ifdef DEBUG // Check for ForceInline stress. if (compStressCompile(STRESS_FORCE_INLINE, 0)) { info.compFlags |= CORINFO_FLG_FORCEINLINE; } if (compIsForInlining()) { JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n", eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); } #endif // DEBUG impCanReimport = compStressCompile(STRESS_CHK_REIMPORT, 15); /* Initialize set a bunch of global values */ info.compScopeHnd = classPtr; info.compXcptnsCount = methodInfo->EHcount; info.compMaxStack = methodInfo->maxStack; compHndBBtab = nullptr; compHndBBtabCount = 0; compHndBBtabAllocCount = 0; info.compNativeCodeSize = 0; info.compTotalHotCodeSize = 0; info.compTotalColdCodeSize = 0; info.compClassProbeCount = 0; compHasBackwardJump = false; compHasBackwardJumpInHandler = false; #ifdef DEBUG compCurBB = nullptr; lvaTable = nullptr; // Reset node and block ID counter compGenTreeID = 0; compStatementID = 0; compBasicBlockID = 0; #endif /* Initialize emitter */ if (!compIsForInlining()) { codeGen->GetEmitter()->emitBegCG(this, compHnd); } info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); info.compHasNextCallRetAddr = false; if (opts.IsReversePInvoke()) { bool unused; info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused); info.compArgOrder = Target::g_tgtUnmanagedArgOrder; } else { info.compCallConv = CorInfoCallConvExtension::Managed; info.compArgOrder = Target::g_tgtArgOrder; } info.compIsVarArgs = false; switch (methodInfo->args.getCallConv()) { case CORINFO_CALLCONV_NATIVEVARARG: case CORINFO_CALLCONV_VARARG: info.compIsVarArgs = true; break; default: break; } info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); info.compUnmanagedCallCountWithGCTransition = 0; info.compLvFrameListRoot = BAD_VAR_NUM; info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); /* Allocate the local variable table */ lvaInitTypeRef(); compInitDebuggingInfo(); // If are an altjit and have patchpoint info, we might need to tweak the frame size // so it's plausible for the altjit architecture. // if (!info.compMatchedVM && compileFlags->IsSet(JitFlags::JIT_FLAG_OSR)) { assert(info.compLocalsCount == info.compPatchpointInfo->NumberOfLocals()); const int totalFrameSize = info.compPatchpointInfo->TotalFrameSize(); int frameSizeUpdate = 0; #if defined(TARGET_AMD64) if ((totalFrameSize % 16) != 8) { frameSizeUpdate = 8; } #elif defined(TARGET_ARM64) if ((totalFrameSize % 16) != 0) { frameSizeUpdate = 8; } #endif if (frameSizeUpdate != 0) { JITDUMP("Mismatched altjit + OSR -- updating tier0 frame size from %d to %d\n", totalFrameSize, totalFrameSize + frameSizeUpdate); // Allocate a local copy with altered frame size. // const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount); PatchpointInfo* const newInfo = (PatchpointInfo*)getAllocator(CMK_Unknown).allocate<char>(patchpointInfoSize); newInfo->Initialize(info.compLocalsCount, totalFrameSize + frameSizeUpdate); newInfo->Copy(info.compPatchpointInfo); // Swap it in place. // info.compPatchpointInfo = newInfo; } } #ifdef DEBUG if (compIsForInlining()) { compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; } #endif const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { // We're prejitting the root method. We also will analyze it as // a potential inline candidate. InlineResult prejitResult(this, methodHnd, "prejit"); // Profile data allows us to avoid early "too many IL bytes" outs. prejitResult.NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, fgHaveSufficientProfileData()); // Do the initial inline screen. impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); // Temporarily install the prejitResult as the // compInlineResult so it's available to fgFindJumpTargets // and can accumulate more observations as the IL is // scanned. // // We don't pass prejitResult in as a parameter to avoid // potential aliasing confusion -- the other call to // fgFindBasicBlocks may have set up compInlineResult and // the code in fgFindJumpTargets references that data // member extensively. assert(compInlineResult == nullptr); assert(impInlineInfo == nullptr); compInlineResult = &prejitResult; // Find the basic blocks. We must do this regardless of // inlineability, since we are prejitting this method. // // This will also update the status of this method as // an inline candidate. fgFindBasicBlocks(); // Undo the temporary setup. assert(compInlineResult == &prejitResult); compInlineResult = nullptr; // If still a viable, discretionary inline, assess // profitability. if (prejitResult.IsDiscretionaryCandidate()) { prejitResult.DetermineProfitability(methodInfo); } m_inlineStrategy->NotePrejitDecision(prejitResult); // Handle the results of the inline analysis. if (prejitResult.IsFailure()) { // This method is a bad inlinee according to our // analysis. We will let the InlineResult destructor // mark it as noinline in the prejit image to save the // jit some work. // // This decision better not be context-dependent. assert(prejitResult.IsNever()); } else { // This looks like a viable inline candidate. Since // we're not actually inlining, don't report anything. prejitResult.SetReported(); } } else { // We are jitting the root method, or inlining. fgFindBasicBlocks(); // If we are doing OSR, update flow to initially reach the appropriate IL offset. // if (opts.IsOSR()) { fgFixEntryFlowForOSR(); } } // If we're inlining and the candidate is bad, bail out. if (compDonotInline()) { goto _Next; } // We may decide to optimize this method, // to avoid spending a long time stuck in Tier0 code. // if (fgCanSwitchToOptimized()) { // We only expect to be able to do this at Tier0. // assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)); // Normal tiering should bail us out of Tier0 tail call induced loops. // So keep these methods in Tier0 if we're gathering PGO data. // If we're not gathering PGO, then switch these to optimized to // minimize the number of tail call helper stubs we might need. // Reconsider this if/when we're able to share those stubs. // // Honor the config setting that tells the jit to // always optimize methods with loops. // // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that // OSR currently cannot handle, or we're optionally suppressing // OSR by method hash. // const char* reason = nullptr; if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { reason = "tail.call and not BBINSTR"; } else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { reason = "loop"; } if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { const char* noPatchpointReason = nullptr; bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG if (canEscapeViaOSR) { // Optionally disable OSR by method hash. This will force any // method that might otherwise get trapped in Tier0 to be optimized. // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { canEscapeViaOSR = false; reason = "OSR disabled by JitEnableOsrRange"; } } #endif if (canEscapeViaOSR) { JITDUMP("\nOSR enabled for this method\n"); } else { JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); assert(reason != nullptr); } } if (reason != nullptr) { fgSwitchToOptimized(reason); } } compSetOptimizationLevel(); #if COUNT_BASIC_BLOCKS bbCntTable.record(fgBBcount); if (fgBBcount == 1) { bbOneBBSizeTable.record(methodInfo->ILCodeSize); } #endif // COUNT_BASIC_BLOCKS #ifdef DEBUG if (verbose) { printf("Basic block list for '%s'\n", info.compFullName); fgDispBasicBlocks(); } #endif #ifdef DEBUG /* Give the function a unique number */ if (opts.disAsm || verbose) { compMethodID = ~info.compMethodHash() & 0xffff; } else { compMethodID = InterlockedIncrement(&s_compMethodsCount); } #endif if (compIsForInlining()) { compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); if (compInlineResult->IsFailure()) { goto _Next; } } #ifdef DEBUG if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining()) { enum { BUFSIZE = 20 }; char osrBuffer[BUFSIZE] = {0}; if (opts.IsOSR()) { // Tiering name already includes "OSR", we just want the IL offset // sprintf_s(osrBuffer, BUFSIZE, " @0x%x", info.compILEntry); } printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled, info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(), compGetTieringName(), osrBuffer, compGetStressMessage()); } if (compIsForInlining()) { compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; compStatementID = impInlineInfo->InlinerCompiler->compStatementID; } #endif compCompile(methodCodePtr, methodCodeSize, compileFlags); #ifdef DEBUG if (compIsForInlining()) { impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; impInlineInfo->InlinerCompiler->compStatementID = compStatementID; impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; } #endif _Next: if (compDonotInline()) { // Verify we have only one inline result in play. assert(impInlineInfo->inlineResult == compInlineResult); } if (!compIsForInlining()) { compCompileFinish(); // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM // can't used the generated code (and we better be an AltJit!). if (!info.compMatchedVM) { return CORJIT_SKIPPED; } #ifdef DEBUG if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALT_JIT) && JitConfig.RunAltJitCode() == 0) { return CORJIT_SKIPPED; } #endif // DEBUG } /* Success! */ return CORJIT_OK; } //------------------------------------------------------------------------ // compFindLocalVarLinear: Linear search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end or nullptr when there is no match found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset. // or NULL if one couldn't be found. // // Note: // Usually called for scope count = 4. Could be called for values upto 8. // VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) { for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* dsc = &info.compVarScopes[i]; if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) { return dsc; } } return nullptr; } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // offs The offset value which should occur within the life of the variable. // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end. // or NULL if one couldn't be found. // // Description: // Linear search for matching variables with their life begin and end containing // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, // else use the hashtable lookup. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return compFindLocalVarLinear(varNum, offs); } else { VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); assert(ret == compFindLocalVarLinear(varNum, offs)); return ret; } } //------------------------------------------------------------------------ // compFindLocalVar: Search for variable's scope containing offset. // // Arguments: // varNum The variable number to search for in the array of scopes. // lifeBeg The life begin of the variable's scope // lifeEnd The life end of the variable's scope // // Return Value: // VarScopeDsc* of a matching variable that contains the offset within its life // begin and life end, or NULL if one couldn't be found. // // Description: // Following are the steps used: // 1. Index into the hashtable using varNum. // 2. Iterate through the linked list at index varNum to find a matching // var scope. // VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) { assert(compVarScopeMap != nullptr); VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { VarScopeListNode* list = info->head; while (list != nullptr) { if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) { return list->data; } list = list->next; } } return nullptr; } //------------------------------------------------------------------------- // compInitVarScopeMap: Create a scope map so it can be looked up by varNum // // Description: // Map.K => Map.V :: varNum => List(ScopeDsc) // // Create a scope map that can be indexed by varNum and can be iterated // on it's values to look for matching scope when given an offs or // lifeBeg and lifeEnd. // // Notes: // 1. Build the map only when we think linear search is slow, i.e., // MAX_LINEAR_FIND_LCL_SCOPELIST is large. // 2. Linked list preserves original array order. // void Compiler::compInitVarScopeMap() { if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) { return; } assert(compVarScopeMap == nullptr); compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); for (unsigned i = 0; i < info.compVarScopesCount; ++i) { unsigned varNum = info.compVarScopes[i].vsdVarNum; VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); // Index by varNum and if the list exists append "node" to the "list". VarScopeMapInfo* info; if (compVarScopeMap->Lookup(varNum, &info)) { info->tail->next = node; info->tail = node; } // Create a new list. else { info = VarScopeMapInfo::Create(node, getAllocator()); compVarScopeMap->Set(varNum, info); } } } struct genCmpLocalVarLifeBeg { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeBeg < elem2->vsdLifeBeg; } }; struct genCmpLocalVarLifeEnd { bool operator()(const VarScopeDsc* elem1, const VarScopeDsc* elem2) { return elem1->vsdLifeEnd < elem2->vsdLifeEnd; } }; inline void Compiler::compInitScopeLists() { if (info.compVarScopesCount == 0) { compEnterScopeList = compExitScopeList = nullptr; return; } // Populate the 'compEnterScopeList' and 'compExitScopeList' lists compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; for (unsigned i = 0; i < info.compVarScopesCount; i++) { compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; } jitstd::sort(compEnterScopeList, compEnterScopeList + info.compVarScopesCount, genCmpLocalVarLifeBeg()); jitstd::sort(compExitScopeList, compExitScopeList + info.compVarScopesCount, genCmpLocalVarLifeEnd()); } void Compiler::compResetScopeLists() { if (info.compVarScopesCount == 0) { return; } assert(compEnterScopeList && compExitScopeList); compNextEnterScope = compNextExitScope = 0; } VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextEnterScope < info.compVarScopesCount) { assert(compEnterScopeList[compNextEnterScope]); unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; assert(scan || (offs <= nextEnterOff)); if (!scan) { if (offs == nextEnterOff) { return compEnterScopeList[compNextEnterScope++]; } } else { if (nextEnterOff <= offs) { return compEnterScopeList[compNextEnterScope++]; } } } return nullptr; } VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) { assert(info.compVarScopesCount); assert(compEnterScopeList && compExitScopeList); if (compNextExitScope < info.compVarScopesCount) { assert(compExitScopeList[compNextExitScope]); unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; assert(scan || (offs <= nextExitOffs)); if (!scan) { if (offs == nextExitOffs) { return compExitScopeList[compNextExitScope++]; } } else { if (nextExitOffs <= offs) { return compExitScopeList[compNextExitScope++]; } } } return nullptr; } // The function will call the callback functions for scopes with boundaries // at instrs from the current status of the scope lists to 'offset', // ordered by instrs. void Compiler::compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) { assert(offset != BAD_IL_OFFSET); assert(inScope != nullptr); bool foundExit = false, foundEnter = true; VarScopeDsc* scope; VarScopeDsc* nextExitScope = nullptr; VarScopeDsc* nextEnterScope = nullptr; unsigned offs = offset, curEnterOffs = 0; goto START_FINDING_SCOPES; // We need to determine the scopes which are open for the current block. // This loop walks over the missing blocks between the current and the // previous block, keeping the enter and exit offsets in lockstep. do { foundExit = foundEnter = false; if (nextExitScope) { (this->*exitScopeFn)(inScope, nextExitScope); nextExitScope = nullptr; foundExit = true; } offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; while ((scope = compGetNextExitScope(offs, true)) != nullptr) { foundExit = true; if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) { // We overshot the last found Enter scope. Save the scope for later // and find an entering scope nextExitScope = scope; break; } (this->*exitScopeFn)(inScope, scope); } if (nextEnterScope) { (this->*enterScopeFn)(inScope, nextEnterScope); curEnterOffs = nextEnterScope->vsdLifeBeg; nextEnterScope = nullptr; foundEnter = true; } offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; START_FINDING_SCOPES: while ((scope = compGetNextEnterScope(offs, true)) != nullptr) { foundEnter = true; if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) { // We overshot the last found exit scope. Save the scope for later // and find an exiting scope nextEnterScope = scope; break; } (this->*enterScopeFn)(inScope, scope); if (!nextExitScope) { curEnterOffs = scope->vsdLifeBeg; } } } while (foundExit || foundEnter); } #if defined(DEBUG) void Compiler::compDispScopeLists() { unsigned i; printf("Local variable scopes = %d\n", info.compVarScopesCount); if (info.compVarScopesCount) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } printf("Sorted by enter scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compEnterScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextEnterScope == i) { printf(" <-- next enter scope"); } printf("\n"); } printf("Sorted by exit scope:\n"); for (i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = compExitScopeList[i]; assert(varScope); printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); if (compNextExitScope == i) { printf(" <-- next exit scope"); } printf("\n"); } } void Compiler::compDispLocalVars() { printf("info.compVarScopesCount = %d\n", info.compVarScopesCount); if (info.compVarScopesCount > 0) { printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n"); } for (unsigned i = 0; i < info.compVarScopesCount; i++) { VarScopeDsc* varScope = &info.compVarScopes[i]; printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum, VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), varScope->vsdLifeBeg, varScope->vsdLifeEnd); } } #endif // DEBUG /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo : public ICorJitInfo { //------------------------------------------------------------------------ // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo // // Arguments: // alloc - the allocator to get memory from for the instance // compile - the compiler instance // compHndRef - the ICorJitInfo handle from the EE; the caller's // copy may be replaced with a "wrapper" instance // // Return Value: // If the config flags indicate that ICorJitInfo should be wrapped, // we return the "wrapper" instance; otherwise we return "nullptr". static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) { WrapICorJitInfo* wrap = nullptr; if (JitConfig.JitEECallTimingInfo() != 0) { // It's too early to use the default allocator, so we do this // in two steps to be safe (the constructor doesn't need to do // anything except fill in the vtable pointer, so we let the // compiler do it). void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); if (inst != nullptr) { // If you get a build error here due to 'WrapICorJitInfo' being // an abstract class, it's very likely that the wrapper bodies // in ICorJitInfo_API_wrapper.hpp are no longer in sync with // the EE interface; please be kind and update the header file. wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); wrap->wrapComp = compiler; // Save the real handle and replace it with our wrapped version. wrap->wrapHnd = compHndRef; compHndRef = wrap; } } return wrap; } private: Compiler* wrapComp; COMP_HANDLE wrapHnd; // the "real thing" public: #include "ICorJitInfo_API_wrapper.hpp" }; #endif // MEASURE_CLRAPI_CALLS /*****************************************************************************/ // Compile a single method int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags, void* inlineInfoPtr) { // // A non-NULL inlineInfo means we are compiling the inlinee method. // InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; bool jitFallbackCompile = false; START: int result = CORJIT_INTERNALERROR; ArenaAllocator* pAlloc = nullptr; ArenaAllocator alloc; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR = nullptr; #endif if (inlineInfo) { // Use inliner's memory allocator when compiling the inlinee. pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); } else { pAlloc = &alloc; } Compiler* pComp; pComp = nullptr; struct Param { Compiler* pComp; ArenaAllocator* pAlloc; bool jitFallbackCompile; CORINFO_METHOD_HANDLE methodHnd; CORINFO_MODULE_HANDLE classPtr; COMP_HANDLE compHnd; CORINFO_METHOD_INFO* methodInfo; void** methodCodePtr; uint32_t* methodCodeSize; JitFlags* compileFlags; InlineInfo* inlineInfo; #if MEASURE_CLRAPI_CALLS WrapICorJitInfo* wrapCLR; #endif int result; } param; param.pComp = nullptr; param.pAlloc = pAlloc; param.jitFallbackCompile = jitFallbackCompile; param.methodHnd = methodHnd; param.classPtr = classPtr; param.compHnd = compHnd; param.methodInfo = methodInfo; param.methodCodePtr = methodCodePtr; param.methodCodeSize = methodCodeSize; param.compileFlags = compileFlags; param.inlineInfo = inlineInfo; #if MEASURE_CLRAPI_CALLS param.wrapCLR = nullptr; #endif param.result = result; setErrorTrap(compHnd, Param*, pParamOuter, &param) { setErrorTrap(nullptr, Param*, pParam, pParamOuter) { if (pParam->inlineInfo) { // Lazily create the inlinee compiler object if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) { pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } // Use the inlinee compiler object pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; #ifdef DEBUG // memset(pParam->pComp, 0xEE, sizeof(Compiler)); #endif } else { // Allocate create the inliner compiler object pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); } #if MEASURE_CLRAPI_CALLS pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); #endif // push this compiler on the stack (TLS) pParam->pComp->prevCompiler = JitTls::GetCompiler(); JitTls::SetCompiler(pParam->pComp); // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here #if defined(_PREFAST_) || defined(_PREFIX_) PREFIX_ASSUME(pParam->pComp != NULL); #else assert(pParam->pComp != nullptr); #endif pParam->pComp->compInit(pParam->pAlloc, pParam->methodHnd, pParam->compHnd, pParam->methodInfo, pParam->inlineInfo); #ifdef DEBUG pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; #endif // Now generate the code pParam->result = pParam->pComp->compCompile(pParam->classPtr, pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); } finallyErrorTrap() { Compiler* pCompiler = pParamOuter->pComp; // If OOM is thrown when allocating memory for a pComp, we will end up here. // For this case, pComp and also pCompiler will be a nullptr // if (pCompiler != nullptr) { pCompiler->info.compCode = nullptr; // pop the compiler off the TLS stack only if it was linked above assert(JitTls::GetCompiler() == pCompiler); JitTls::SetCompiler(pCompiler->prevCompiler); } if (pParamOuter->inlineInfo == nullptr) { // Free up the allocator we were using pParamOuter->pAlloc->destroy(); } } endErrorTrap() } impJitErrorTrap() { // If we were looking at an inlinee.... if (inlineInfo != nullptr) { // Note that we failed to compile the inlinee, and that // there's no point trying to inline it again anywhere else. inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); } param.result = __errc; } endErrorTrap() result = param.result; if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR || result == CORJIT_IMPLLIMITATION) && !jitFallbackCompile) { // If we failed the JIT, reattempt with debuggable code. jitFallbackCompile = true; // Update the flags for 'safer' code generation. compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); goto START; } return result; } #if defined(UNIX_AMD64_ABI) // GetTypeFromClassificationAndSizes: // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. // // args: // classType: classification type // size: size of the eightbyte. // // static var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) { var_types type = TYP_UNKNOWN; switch (classType) { case SystemVClassificationTypeInteger: if (size == 1) { type = TYP_BYTE; } else if (size <= 2) { type = TYP_SHORT; } else if (size <= 4) { type = TYP_INT; } else if (size <= 8) { type = TYP_LONG; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: type = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: type = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (size <= 4) { type = TYP_FLOAT; } else if (size <= 8) { type = TYP_DOUBLE; } else { assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type."); } break; default: assert(false && "GetTypeFromClassificationAndSizes Invalid classification type."); break; } return type; } //------------------------------------------------------------------- // GetEightByteType: Returns the type of eightbyte slot of a struct // // Arguments: // structDesc - struct classification description. // slotNum - eightbyte slot number for the struct. // // Return Value: // type of the eightbyte slot of the struct // // static var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum) { var_types eightByteType = TYP_UNDEF; unsigned len = structDesc.eightByteSizes[slotNum]; switch (structDesc.eightByteClassifications[slotNum]) { case SystemVClassificationTypeInteger: // See typelist.h for jit type definition. // All the types of size < 4 bytes are of jit type TYP_INT. if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_INT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_LONG; } else { assert(false && "GetEightByteType Invalid Integer classification type."); } break; case SystemVClassificationTypeIntegerReference: assert(len == REGSIZE_BYTES); eightByteType = TYP_REF; break; case SystemVClassificationTypeIntegerByRef: assert(len == REGSIZE_BYTES); eightByteType = TYP_BYREF; break; case SystemVClassificationTypeSSE: if (structDesc.eightByteSizes[slotNum] <= 4) { eightByteType = TYP_FLOAT; } else if (structDesc.eightByteSizes[slotNum] <= 8) { eightByteType = TYP_DOUBLE; } else { assert(false && "GetEightByteType Invalid SSE classification type."); } break; default: assert(false && "GetEightByteType Invalid classification type."); break; } return eightByteType; } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'structDesc' - struct description // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // // static void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { *offset0 = structDesc.eightByteOffsets[0]; *offset1 = structDesc.eightByteOffsets[1]; *type0 = TYP_UNKNOWN; *type1 = TYP_UNKNOWN; // Set the first eightbyte data if (structDesc.eightByteCount >= 1) { *type0 = GetEightByteType(structDesc, 0); } // Set the second eight byte data if (structDesc.eightByteCount == 2) { *type1 = GetEightByteType(structDesc, 1); } } //------------------------------------------------------------------------------------------------------ // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. // // Arguments: // 'typeHnd' - type handle // 'type0' - out param; returns the type of the first eightbyte. // 'type1' - out param; returns the type of the second eightbyte. // 'offset0' - out param; returns the offset of the first eightbyte. // 'offset1' - out param; returns the offset of the second eightbyte. // void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1) { SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); assert(structDesc.passedInRegisters); GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); } #endif // defined(UNIX_AMD64_ABI) /*****************************************************************************/ /*****************************************************************************/ #ifdef DEBUG Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() { NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); if (m_nodeTestData == nullptr) { return reachable; } // Otherwise, iterate. for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->NonPhiStatements()) { for (GenTree* const tree : stmt->TreeList()) { TestLabelAndNum tlAndN; // For call nodes, translate late args to what they stand for. if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); unsigned i = 0; for (GenTreeCall::Use& use : call->Args()) { if ((use.GetNode()->gtFlags & GTF_LATE_ARG) != 0) { // Find the corresponding late arg. GenTree* lateArg = call->fgArgInfo->GetArgNode(i); if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) { reachable->Set(lateArg, 0); } } i++; } } if (GetNodeTestData()->Lookup(tree, &tlAndN)) { reachable->Set(tree, 0); } } } } return reachable; } void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) { TestLabelAndNum tlAndN; // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... // If the table is null, don't create it just to do the lookup, which would fail... if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) { assert(!GetNodeTestData()->Lookup(to, &tlAndN)); // We can't currently associate multiple annotations with a single node. // If we need to, we can fix this... TestLabelAndNum tlAndNTo; assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); GetNodeTestData()->Remove(from); GetNodeTestData()->Set(to, tlAndN); } } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX jvc XX XX XX XX Functions for the stand-alone version of the JIT . XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ void codeGeneratorCodeSizeBeg() { } /***************************************************************************** * * Used for counting pointer assignments. */ /*****************************************************************************/ void codeGeneratorCodeSizeEnd() { } /***************************************************************************** * * Gather statistics - mainly used for the standalone * Enable various #ifdef's to get the information you need */ void Compiler::compJitStats() { #if CALL_ARG_STATS /* Method types and argument statistics */ compCallArgStats(); #endif // CALL_ARG_STATS } #if CALL_ARG_STATS /***************************************************************************** * * Gather statistics about method calls and arguments */ void Compiler::compCallArgStats() { unsigned argNum; unsigned argDWordNum; unsigned argLngNum; unsigned argFltNum; unsigned argDblNum; unsigned regArgNum; unsigned regArgDeferred; unsigned regArgTemp; unsigned regArgLclVar; unsigned regArgConst; unsigned argTempsThisMethod = 0; assert(fgStmtListThreaded); for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { for (GenTree* const call : stmt->TreeList()) { if (call->gtOper != GT_CALL) continue; argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum = argLngNum = argFltNum = argDblNum = 0; argTotalCalls++; if (call->AsCall()->gtCallThisArg == nullptr) { if (call->AsCall()->gtCallType == CT_HELPER) { argHelperCalls++; } else { argStaticCalls++; } } else { /* We have a 'this' pointer */ argDWordNum++; argNum++; regArgNum++; regArgDeferred++; argTotalObjPtr++; if (call->AsCall()->IsVirtual()) { /* virtual function */ argVirtualCalls++; } else { argNonVirtualCalls++; } } } } } argTempsCntTable.record(argTempsThisMethod); if (argMaxTempsPerMethod < argTempsThisMethod) { argMaxTempsPerMethod = argTempsThisMethod; } } /* static */ void Compiler::compDispCallArgStats(FILE* fout) { if (argTotalCalls == 0) return; fprintf(fout, "\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Call stats\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float)argTotalCalls / genMethodCnt); fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls); fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls); fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls); fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls); fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls); fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs); fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs); fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs); fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs); if (argTotalRegArgs == 0) return; /* fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); */ fprintf(fout, "\nRegister Arguments:\n\n"); fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs); fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs); fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod); fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs); // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / // argTotalRegArgs); fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs); fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Argument count frequency table (includes ObjPtr):\n"); fprintf(fout, "--------------------------------------------------\n"); argCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "Temps count frequency table (per method):\n"); fprintf(fout, "--------------------------------------------------\n"); argTempsCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); /* fprintf(fout, "--------------------------------------------------\n"); fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); fprintf(fout, "--------------------------------------------------\n"); argDWordLngCntTable.dump(fout); fprintf(fout, "--------------------------------------------------\n"); */ } #endif // CALL_ARG_STATS // JIT time end to end, and by phases. #ifdef FEATURE_JIT_METHOD_PERF // Static variables CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; #if MEASURE_CLRAPI_CALLS double JitTimer::s_cyclesPerSec = CachedCyclesPerSecond(); #endif #endif // FEATURE_JIT_METHOD_PERF #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) const char* PhaseNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, #include "compphases.h" }; const char* PhaseEnums[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, #include "compphases.h" }; const LPCWSTR PhaseShortNames[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), #include "compphases.h" }; #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS #ifdef FEATURE_JIT_METHOD_PERF bool PhaseHasChildren[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, #include "compphases.h" }; int PhaseParent[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, #include "compphases.h" }; bool PhaseReportsIRSize[] = { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, #include "compphases.h" }; CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) : m_byteCodeBytes(byteCodeBytes) , m_totalCycles(0) , m_parentPhaseEndSlop(0) , m_timerFailure(false) #if MEASURE_CLRAPI_CALLS , m_allClrAPIcalls(0) , m_allClrAPIcycles(0) #endif { for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_invokesByPhase[i] = 0; m_cyclesByPhase[i] = 0; #if MEASURE_CLRAPI_CALLS m_CLRinvokesByPhase[i] = 0; m_CLRcyclesByPhase[i] = 0; #endif } #if MEASURE_CLRAPI_CALLS assert(ArrLen(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); assert(ArrLen(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_perClrAPIcalls[i] = 0; m_perClrAPIcycles[i] = 0; m_maxClrAPIcycles[i] = 0; } #endif } bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) { return false; // info.m_byteCodeBytes < 10; } //------------------------------------------------------------------------ // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. // // Arguments: // info - The timing information to record. // includePhases - If "true", the per-phase info in "info" is valid, // which means that a "normal" compile has ended; if // the value is "false" we are recording the results // of a partial compile (typically an import-only run // on behalf of the inliner) in which case the phase // info is not valid and so we only record EE call // overhead. void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) { if (info.m_timerFailure) { return; // Don't update if there was a failure. } CritSecHolder timeLock(s_compTimeSummaryLock); if (includePhases) { bool includeInFiltered = IncludedInFilteredData(info); m_numMethods++; // Update the totals and maxima. m_total.m_byteCodeBytes += info.m_byteCodeBytes; m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); m_total.m_totalCycles += info.m_totalCycles; m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); #if MEASURE_CLRAPI_CALLS // Update the CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); #endif if (includeInFiltered) { m_numFilteredMethods++; m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; m_filtered.m_totalCycles += info.m_totalCycles; m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; } for (int i = 0; i < PHASE_NUMBER_OF; i++) { m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif if (includeInFiltered) { m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; #if MEASURE_CLRAPI_CALLS m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; #endif } m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); #if MEASURE_CLRAPI_CALLS m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); #endif } m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); } #if MEASURE_CLRAPI_CALLS else { m_totMethods++; // Update the "global" CLR-API values. m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); // Update the per-phase CLR-API values. m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; m_maximum.m_invokesByPhase[PHASE_CLR_API] = max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; m_maximum.m_cyclesByPhase[PHASE_CLR_API] = max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); } for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); } #endif } // Static LPCWSTR Compiler::compJitTimeLogFilename = nullptr; void CompTimeSummaryInfo::Print(FILE* f) { if (f == nullptr) { return; } // Otherwise... double countsPerSec = CachedCyclesPerSecond(); if (countsPerSec == 0.0) { fprintf(f, "Processor does not have a high-frequency timer.\n"); return; } double totTime_ms = 0.0; fprintf(f, "JIT Compilation time report:\n"); fprintf(f, " Compiled %d methods.\n", m_numMethods); if (m_numMethods != 0) { fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0, ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); const char* extraHdr1 = ""; const char* extraHdr2 = ""; #if MEASURE_CLRAPI_CALLS bool extraInfo = (JitConfig.JitEECallTimingInfo() != 0); if (extraInfo) { extraHdr1 = " CLRs/meth % in CLR"; extraHdr2 = "-----------------------"; } #endif fprintf(f, "\n Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n", extraHdr1); fprintf(f, " ---------------------------------------------------------------------------------------%s\n", extraHdr2); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; #if MEASURE_CLRAPI_CALLS // Skip showing CLR API call info if we didn't collect any if (i == PHASE_CLR_API && !extraInfo) continue; #endif // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f", PhaseNames[i], ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms); #if MEASURE_CLRAPI_CALLS if (extraInfo && i != PHASE_CLR_API) { double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; double nest_percent = nest_tot_ms * 100.0 / totTime_ms; double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); if (nest_percent > 0.1 || calls_per_fn > 10) fprintf(f, " %5.1f %8.2f%%", calls_per_fn, nest_percent); } #endif fprintf(f, "\n"); } // Show slop if it's over a certain percentage of the total double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; if (pslop_pct >= 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); } } if (m_numFilteredMethods > 0) { fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods); fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0), totTime_ms); fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, totTime_ms / (double)m_numFilteredMethods); fprintf(f, " Total time by phases:\n"); fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n"); fprintf(f, " --------------------------------------------------------------------------------------\n"); // Ensure that at least the names array and the Phases enum have the same number of entries: assert(ArrLen(PhaseNames) == PHASE_NUMBER_OF); for (int i = 0; i < PHASE_NUMBER_OF; i++) { double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; // Indent nested phases, according to depth. int ancPhase = PhaseParent[i]; while (ancPhase != -1) { fprintf(f, " "); ancPhase = PhaseParent[ancPhase]; } fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i], ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms)); } double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; if (fslop_ms > 1.0) { fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " "%3.1f%% of total.\n\n", m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); } } #if MEASURE_CLRAPI_CALLS if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) { fprintf(f, "\n"); if (m_totMethods > 0) fprintf(f, " Imported %u methods.\n\n", m_numMethods + m_totMethods); fprintf(f, " CLR API # calls total time max time avg time %% " "of total\n"); fprintf(f, " -------------------------------------------------------------------------------"); fprintf(f, "---------------------\n"); static const char* APInames[] = { #define DEF_CLR_API(name) #name, #include "ICorJitInfo_API_names.h" }; unsigned shownCalls = 0; double shownMillis = 0.0; #ifdef DEBUG unsigned checkedCalls = 0; double checkedMillis = 0.0; #endif for (unsigned pass = 0; pass < 2; pass++) { for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) { unsigned calls = m_total.m_perClrAPIcalls[i]; if (calls == 0) continue; unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; double millis = 1000.0 * cycles / countsPerSec; // Don't show the small fry to keep the results manageable if (millis < 0.5) { // We always show the following API because it is always called // exactly once for each method and its body is the simplest one // possible (it just returns an integer constant), and therefore // it can be used to measure the overhead of adding the CLR API // timing code. Roughly speaking, on a 3GHz x64 box the overhead // per call should be around 40 ns when using RDTSC, compared to // about 140 ns when using GetThreadCycles() under Windows. if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) continue; } // In the first pass we just compute the totals. if (pass == 0) { shownCalls += m_total.m_perClrAPIcalls[i]; shownMillis += millis; continue; } unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; double max_ms = 1000.0 * maxcyc / countsPerSec; fprintf(f, " %-40s", APInames[i]); // API name fprintf(f, " %8u %9.1f ms", calls, millis); // #calls, total time fprintf(f, " %8.1f ms %8.1f ns", max_ms, 1000000.0 * millis / calls); // max, avg time fprintf(f, " %5.1f%%\n", 100.0 * millis / shownMillis); // % of total #ifdef DEBUG checkedCalls += m_total.m_perClrAPIcalls[i]; checkedMillis += millis; #endif } } #ifdef DEBUG assert(checkedCalls == shownCalls); assert(checkedMillis == shownMillis); #endif if (shownCalls > 0 || shownMillis > 0) { fprintf(f, " -------------------------"); fprintf(f, "---------------------------------------------------------------------------\n"); fprintf(f, " Total for calls shown above %8u %10.1f ms", shownCalls, shownMillis); if (totTime_ms > 0.0) fprintf(f, " (%4.1lf%% of overall JIT time)", shownMillis * 100.0 / totTime_ms); fprintf(f, "\n"); } fprintf(f, "\n"); } #endif fprintf(f, "\n"); } JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) { #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif #ifdef DEBUG m_lastPhase = (Phases)-1; #if MEASURE_CLRAPI_CALLS m_CLRcallAPInum = -1; #endif #endif unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { m_start = threadCurCycles; m_curPhaseStart = threadCurCycles; } } void JitTimer::EndPhase(Compiler* compiler, Phases phase) { // Otherwise... // We re-run some phases currently, so this following assert doesn't work. // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. // Credit the duration to "slop", the total of which should be very small. if (PhaseHasChildren[phase]) { m_info.m_parentPhaseEndSlop += phaseCycles; } else { // It is a leaf phase. Credit duration to it. m_info.m_invokesByPhase[phase]++; m_info.m_cyclesByPhase[phase] += phaseCycles; #if MEASURE_CLRAPI_CALLS // Record the CLR API timing info as well. m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; #endif // Credit the phase's ancestors, if any. int ancPhase = PhaseParent[phase]; while (ancPhase != -1) { m_info.m_cyclesByPhase[ancPhase] += phaseCycles; ancPhase = PhaseParent[ancPhase]; } #if MEASURE_CLRAPI_CALLS const Phases lastPhase = PHASE_CLR_API; #else const Phases lastPhase = PHASE_NUMBER_OF; #endif if (phase + 1 == lastPhase) { m_info.m_totalCycles = (threadCurCycles - m_start); } else { m_curPhaseStart = threadCurCycles; } } if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) { m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); } else { m_info.m_nodeCountAfterPhase[phase] = 0; } } #ifdef DEBUG m_lastPhase = phase; #endif #if MEASURE_CLRAPI_CALLS m_CLRcallInvokes = 0; m_CLRcallCycles = 0; #endif } #if MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------ // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. // // Arguments: // apix - The API index - an "enum API_ICorJitInfo_Names" value. // void JitTimer::CLRApiCallEnter(unsigned apix) { assert(m_CLRcallAPInum == -1); // Nested calls not allowed m_CLRcallAPInum = apix; // If we can't get the cycles, we'll just ignore this call if (!_our_GetThreadCycles(&m_CLRcallStart)) m_CLRcallStart = 0; } //------------------------------------------------------------------------ // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. // // Arguments: // apix - The API's "enum API_ICorJitInfo_Names" value; this value // should match the value passed to the most recent call to // "CLRApiCallEnter" (i.e. these must come as matched pairs), // and they also may not nest. // void JitTimer::CLRApiCallLeave(unsigned apix) { // Make sure we're actually inside a measured CLR call. assert(m_CLRcallAPInum != -1); m_CLRcallAPInum = -1; // Ignore this one if we don't have a valid starting counter. if (m_CLRcallStart != 0) { if (JitConfig.JitEECallTimingInfo() != 0) { unsigned __int64 threadCurCycles; if (_our_GetThreadCycles(&threadCurCycles)) { // Compute the cycles spent in the call. threadCurCycles -= m_CLRcallStart; // Add the cycles to the 'phase' and bump its use count. m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; m_info.m_invokesByPhase[PHASE_CLR_API] += 1; // Add the values to the "per API" info. m_info.m_allClrAPIcycles += threadCurCycles; m_info.m_allClrAPIcalls += 1; m_info.m_perClrAPIcalls[apix] += 1; m_info.m_perClrAPIcycles[apix] += threadCurCycles; m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); // Subtract the cycles from the enclosing phase by bumping its start time m_curPhaseStart += threadCurCycles; // Update the running totals. m_CLRcallInvokes += 1; m_CLRcallCycles += threadCurCycles; } } m_CLRcallStart = 0; } assert(m_CLRcallAPInum != -1); // No longer in this API call. m_CLRcallAPInum = -1; } #endif // MEASURE_CLRAPI_CALLS CritSecObject JitTimer::s_csvLock; // It's expensive to constantly open and close the file, so open it once and close it // when the process exits. This should be accessed under the s_csvLock. FILE* JitTimer::s_csvFile = nullptr; LPCWSTR Compiler::JitTimeLogCsv() { LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); return jitTimeLogCsv; } void JitTimer::PrintCsvHeader() { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { s_csvFile = _wfopen(jitTimeLogCsv, W("a")); } if (s_csvFile != nullptr) { // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows fseek(s_csvFile, 0, SEEK_END); // Write the header if the file is empty if (ftell(s_csvFile) == 0) { fprintf(s_csvFile, "\"Method Name\","); fprintf(s_csvFile, "\"Assembly or SPMI Index\","); fprintf(s_csvFile, "\"IL Bytes\","); fprintf(s_csvFile, "\"Basic Blocks\","); fprintf(s_csvFile, "\"Min Opts\","); fprintf(s_csvFile, "\"Loops\","); fprintf(s_csvFile, "\"Loops Cloned\","); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "\"Alignment Candidates\","); fprintf(s_csvFile, "\"Loops Aligned\","); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN for (int i = 0; i < PHASE_NUMBER_OF; i++) { fprintf(s_csvFile, "\"%s\",", PhaseNames[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "\"Node Count After %s\",", PhaseNames[i]); } } InlineStrategy::DumpCsvHeader(s_csvFile); fprintf(s_csvFile, "\"Executable Code Bytes\","); fprintf(s_csvFile, "\"GC Info Bytes\","); fprintf(s_csvFile, "\"Total Bytes Allocated\","); fprintf(s_csvFile, "\"Total Cycles\","); fprintf(s_csvFile, "\"CPS\"\n"); fflush(s_csvFile); } } } void JitTimer::PrintCsvMethodStats(Compiler* comp) { LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); if (jitTimeLogCsv == nullptr) { return; } // eeGetMethodFullName uses locks, so don't enter crit sec before this call. #if defined(DEBUG) || defined(LATE_DISASM) // If we already have computed the name because for some reason we're generating the CSV // for a DEBUG build (presumably not for the time info), just re-use it. const char* methName = comp->info.compFullName; #else const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); #endif // Try and access the SPMI index to report in the data set. // // If the jit is not hosted under SPMI this will return the // default value of zero. // // Query the jit host directly here instead of going via the // config cache, since value will change for each method. int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber"), -1); CritSecHolder csvLock(s_csvLock); if (s_csvFile == nullptr) { return; } fprintf(s_csvFile, "\"%s\",", methName); if (index != 0) { fprintf(s_csvFile, "%d,", index); } else { const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); fprintf(s_csvFile, "\"%s\",", methodAssemblyName); } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN #ifdef DEBUG fprintf(s_csvFile, "%u,", comp->loopAlignCandidates); fprintf(s_csvFile, "%u,", comp->loopsAligned); #endif // DEBUG #endif // FEATURE_LOOP_ALIGN unsigned __int64 totCycles = 0; for (int i = 0; i < PHASE_NUMBER_OF; i++) { if (!PhaseHasChildren[i]) { totCycles += m_info.m_cyclesByPhase[i]; } fprintf(s_csvFile, "%I64u,", m_info.m_cyclesByPhase[i]); if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) { fprintf(s_csvFile, "%u,", m_info.m_nodeCountAfterPhase[i]); } } comp->m_inlineStrategy->DumpCsvData(s_csvFile); fprintf(s_csvFile, "%u,", comp->info.compNativeCodeSize); fprintf(s_csvFile, "%Iu,", comp->compInfoBlkSize); fprintf(s_csvFile, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated()); fprintf(s_csvFile, "%I64u,", m_info.m_totalCycles); fprintf(s_csvFile, "%f\n", CachedCyclesPerSecond()); fflush(s_csvFile); } // Perform process shutdown actions. // // static void JitTimer::Shutdown() { CritSecHolder csvLock(s_csvLock); if (s_csvFile != nullptr) { fclose(s_csvFile); } } // Completes the timing of the current method, and adds it to "sum". void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) { if (includePhases) { PrintCsvMethodStats(comp); } sum.AddInfo(m_info, includePhases); } #endif // FEATURE_JIT_METHOD_PERF #if LOOP_HOIST_STATS // Static fields. CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. unsigned Compiler::s_loopsConsidered = 0; unsigned Compiler::s_loopsWithHoistedExpressions = 0; unsigned Compiler::s_totalHoistedExpressions = 0; // static void Compiler::PrintAggregateLoopHoistStats(FILE* f) { fprintf(f, "\n"); fprintf(f, "---------------------------------------------------\n"); fprintf(f, "Loop hoisting stats\n"); fprintf(f, "---------------------------------------------------\n"); double pctWithHoisted = 0.0; if (s_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (s_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); } fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted); fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", s_totalHoistedExpressions, exprsPerLoopWithExpr); } void Compiler::AddLoopHoistStats() { CritSecHolder statsLock(s_loopHoistStatsLock); s_loopsConsidered += m_loopsConsidered; s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; s_totalHoistedExpressions += m_totalHoistedExpressions; } void Compiler::PrintPerMethodLoopHoistStats() { double pctWithHoisted = 0.0; if (m_loopsConsidered > 0) { pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); } double exprsPerLoopWithExpr = 0.0; if (m_loopsWithHoistedExpressions > 0) { exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); } printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted); printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n", m_totalHoistedExpressions, exprsPerLoopWithExpr); } #endif // LOOP_HOIST_STATS //------------------------------------------------------------------------ // RecordStateAtEndOfInlining: capture timing data (if enabled) after // inlining as completed. // // Note: // Records data needed for SQM and inlining data dumps. Should be // called after inlining is complete. (We do this after inlining // because this marks the last point at which the JIT is likely to // cause type-loading and class initialization). void Compiler::RecordStateAtEndOfInlining() { #if defined(DEBUG) || defined(INLINE_DATA) m_compCyclesAtEndOfInlining = 0; m_compTickCountAtEndOfInlining = 0; bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); if (!b) { return; // We don't have a thread cycle counter. } m_compTickCountAtEndOfInlining = GetTickCount(); #endif // defined(DEBUG) || defined(INLINE_DATA) } //------------------------------------------------------------------------ // RecordStateAtEndOfCompilation: capture timing data (if enabled) after // compilation is completed. void Compiler::RecordStateAtEndOfCompilation() { #if defined(DEBUG) || defined(INLINE_DATA) // Common portion m_compCycles = 0; unsigned __int64 compCyclesAtEnd; bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); if (!b) { return; // We don't have a thread cycle counter. } assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; #endif // defined(DEBUG) || defined(INLINE_DATA) } #if FUNC_INFO_LOGGING // static LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; // static FILE* Compiler::compJitFuncInfoFile = nullptr; #endif // FUNC_INFO_LOGGING #ifdef DEBUG // dumpConvertedVarSet() dumps the varset bits that are tracked // variable indices, and we convert them to variable numbers, sort the variable numbers, and // print them as variable numbers. To do this, we use a temporary set indexed by // variable number. We can't use the "all varset" type because it is still size-limited, and might // not be big enough to handle all possible variable numbers. void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) { BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); pVarNumSet = (BYTE*)_alloca(varNumSetBytes); memset(pVarNumSet, 0, varNumSetBytes); // empty the set VarSetOps::Iter iter(comp, vars); unsigned varIndex = 0; while (iter.NextElem(&varIndex)) { unsigned varNum = comp->lvaTrackedIndexToLclNum(varIndex); pVarNumSet[varNum] = 1; // This varNum is in the set } bool first = true; printf("{"); for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) { if (pVarNumSet[varNum] == 1) { if (!first) { printf(" "); } printf("V%02u", varNum); first = false; } } printf("}"); } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Debugging helpers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* The following functions are intended to be called from the debugger, to dump * various data structures. * * The versions that start with 'c' take a Compiler* as the first argument. * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. * * Summary: * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). * "V" means "verbose", and will dump all the trees. * cStmt, dStmt : Display a Statement (call gtDispStmt()). * cTree, dTree : Display a tree (call gtDispTree()). * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). * cVars, dVars : Display the local variable table (call lvaTableDump()). * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). * cReach, dReach : Display all block reachability (call fgDispReach()). * cDoms, dDoms : Display all block dominators (call fgDispDoms()). * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable * indices. These are converted to variable numbers and sorted. (Calls * dumpConvertedVarSet()). * cLoop, dLoop : Display the blocks of a loop, including the trees. * cTreeFlags, dTreeFlags : Display tree flags * * The following don't require a Compiler* to work: * dRegMask : Display a regMaskTP (call dspRegMask(mask)). * dBlockList : Display a BasicBlockList*. */ void cBlock(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Block %u\n", sequenceNumber++); comp->fgTableDispBasicBlock(block); } void cBlocks(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Blocks %u\n", sequenceNumber++); comp->fgDispBasicBlocks(); } void cBlocksV(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlocksV %u\n", sequenceNumber++); comp->fgDispBasicBlocks(true); } void cStmt(Compiler* comp, Statement* statement) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Stmt %u\n", sequenceNumber++); comp->gtDispStmt(statement, ">>>"); } void cTree(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Tree %u\n", sequenceNumber++); comp->gtDispTree(tree, nullptr, ">>>"); } void cTreeLIR(Compiler* comp, GenTree* tree) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *TreeLIR %u\n", sequenceNumber++); comp->gtDispLIRNode(tree); } void cTrees(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Trees %u\n", sequenceNumber++); comp->fgDumpTrees(comp->fgFirstBB, nullptr); } void cEH(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *EH %u\n", sequenceNumber++); comp->fgDispHandlerTab(); } void cVar(Compiler* comp, unsigned lclNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Var %u\n", sequenceNumber++); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVarDsc(Compiler* comp, LclVarDsc* varDsc) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *VarDsc %u\n", sequenceNumber++); unsigned lclNum = comp->lvaGetLclNum(varDsc); comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); } void cVars(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(); } void cVarsFinal(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Vars %u\n", sequenceNumber++); comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); } void cBlockCheapPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++); block->dspCheapPreds(); } void cBlockPreds(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockPreds %u\n", sequenceNumber++); block->dspPreds(); } void cBlockSuccs(Compiler* comp, BasicBlock* block) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++); block->dspSuccs(comp); } void cReach(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Reach %u\n", sequenceNumber++); comp->fgDispReach(); } void cDoms(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Doms %u\n", sequenceNumber++); comp->fgDispDoms(); } void cLiveness(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Liveness %u\n", sequenceNumber++); comp->fgDispBBLiveness(); } void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *CVarSet %u\n", sequenceNumber++); dumpConvertedVarSet(comp, vars); printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline } void cLoop(Compiler* comp, unsigned loopNum) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loop %u\n", sequenceNumber++); comp->optPrintLoopInfo(loopNum, /* verbose */ true); printf("\n"); } void cLoopPtr(Compiler* comp, const Compiler::LoopDsc* loop) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *LoopPtr %u\n", sequenceNumber++); comp->optPrintLoopInfo(loop, /* verbose */ true); printf("\n"); } void cLoops(Compiler* comp) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== *Loops %u\n", sequenceNumber++); comp->optPrintLoopTable(); } void dBlock(BasicBlock* block) { cBlock(JitTls::GetCompiler(), block); } void dBlocks() { cBlocks(JitTls::GetCompiler()); } void dBlocksV() { cBlocksV(JitTls::GetCompiler()); } void dStmt(Statement* statement) { cStmt(JitTls::GetCompiler(), statement); } void dTree(GenTree* tree) { cTree(JitTls::GetCompiler(), tree); } void dTreeLIR(GenTree* tree) { cTreeLIR(JitTls::GetCompiler(), tree); } void dTreeRange(GenTree* first, GenTree* last) { Compiler* comp = JitTls::GetCompiler(); GenTree* cur = first; while (true) { cTreeLIR(comp, cur); if (cur == last) break; cur = cur->gtNext; } } void dTrees() { cTrees(JitTls::GetCompiler()); } void dEH() { cEH(JitTls::GetCompiler()); } void dVar(unsigned lclNum) { cVar(JitTls::GetCompiler(), lclNum); } void dVarDsc(LclVarDsc* varDsc) { cVarDsc(JitTls::GetCompiler(), varDsc); } void dVars() { cVars(JitTls::GetCompiler()); } void dVarsFinal() { cVarsFinal(JitTls::GetCompiler()); } void dBlockPreds(BasicBlock* block) { cBlockPreds(JitTls::GetCompiler(), block); } void dBlockCheapPreds(BasicBlock* block) { cBlockCheapPreds(JitTls::GetCompiler(), block); } void dBlockSuccs(BasicBlock* block) { cBlockSuccs(JitTls::GetCompiler(), block); } void dReach() { cReach(JitTls::GetCompiler()); } void dDoms() { cDoms(JitTls::GetCompiler()); } void dLiveness() { cLiveness(JitTls::GetCompiler()); } void dCVarSet(VARSET_VALARG_TP vars) { cCVarSet(JitTls::GetCompiler(), vars); } void dLoop(unsigned loopNum) { cLoop(JitTls::GetCompiler(), loopNum); } void dLoopPtr(const Compiler::LoopDsc* loop) { cLoopPtr(JitTls::GetCompiler(), loop); } void dLoops() { cLoops(JitTls::GetCompiler()); } void dRegMask(regMaskTP mask) { static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called printf("===================================================================== dRegMask %u\n", sequenceNumber++); dspRegMask(mask); printf("\n"); // dspRegMask() doesn't emit a trailing newline } void dBlockList(BasicBlockList* list) { printf("WorkList: "); while (list != nullptr) { printf(FMT_BB " ", list->block->bbNum); list = list->next; } printf("\n"); } // Global variables available in debug mode. That are set by debug APIs for finding // Trees, Stmts, and/or Blocks using id or bbNum. // That can be used in watch window or as a way to get address of fields for data break points. GenTree* dbTree; Statement* dbStmt; BasicBlock* dbTreeBlock; BasicBlock* dbBlock; // Debug APIs for finding Trees, Stmts, and/or Blocks. // As a side effect, they set the debug variables above. GenTree* dFindTree(GenTree* tree, unsigned id) { if (tree == nullptr) { return nullptr; } if (tree->gtTreeID == id) { dbTree = tree; return tree; } GenTree* child = nullptr; tree->VisitOperands([&child, id](GenTree* operand) -> GenTree::VisitResult { child = dFindTree(child, id); return (child != nullptr) ? GenTree::VisitResult::Abort : GenTree::VisitResult::Continue; }); return child; } GenTree* dFindTree(unsigned id) { Compiler* comp = JitTls::GetCompiler(); GenTree* tree; dbTreeBlock = nullptr; dbTree = nullptr; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { tree = dFindTree(stmt->GetRootNode(), id); if (tree != nullptr) { dbTreeBlock = block; return tree; } } } return nullptr; } Statement* dFindStmt(unsigned id) { Compiler* comp = JitTls::GetCompiler(); dbStmt = nullptr; unsigned stmtId = 0; for (BasicBlock* const block : comp->Blocks()) { for (Statement* const stmt : block->Statements()) { stmtId++; if (stmtId == id) { dbStmt = stmt; return stmt; } } } return nullptr; } BasicBlock* dFindBlock(unsigned bbNum) { Compiler* comp = JitTls::GetCompiler(); BasicBlock* block = nullptr; dbBlock = nullptr; for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) { if (block->bbNum == bbNum) { dbBlock = block; break; } } return block; } Compiler::LoopDsc* dFindLoop(unsigned loopNum) { Compiler* comp = JitTls::GetCompiler(); if (loopNum >= comp->optLoopCount) { printf("loopNum %u out of range\n"); return nullptr; } return &comp->optLoopTable[loopNum]; } void cTreeFlags(Compiler* comp, GenTree* tree) { int chars = 0; if (tree->gtFlags != 0) { chars += printf("flags="); // Node flags CLANG_FORMAT_COMMENT_ANCHOR; #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) { chars += printf("[NODE_LARGE]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) { chars += printf("[NODE_SMALL]"); } if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) { chars += printf("[MORPHED]"); } #endif // defined(DEBUG) if (tree->gtFlags & GTF_COLON_COND) { chars += printf("[COLON_COND]"); } // Operator flags genTreeOps op = tree->OperGet(); switch (op) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_LCL_FLD: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (tree->gtFlags & GTF_VAR_DEF) { chars += printf("[VAR_DEF]"); } if (tree->gtFlags & GTF_VAR_USEASG) { chars += printf("[VAR_USEASG]"); } if (tree->gtFlags & GTF_VAR_CAST) { chars += printf("[VAR_CAST]"); } if (tree->gtFlags & GTF_VAR_ITERATOR) { chars += printf("[VAR_ITERATOR]"); } if (tree->gtFlags & GTF_VAR_CLONED) { chars += printf("[VAR_CLONED]"); } if (tree->gtFlags & GTF_VAR_DEATH) { chars += printf("[VAR_DEATH]"); } if (tree->gtFlags & GTF_VAR_ARR_INDEX) { chars += printf("[VAR_ARR_INDEX]"); } #if defined(DEBUG) if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) { chars += printf("[VAR_CSE_REF]"); } #endif break; case GT_NO_OP: break; case GT_FIELD: if (tree->gtFlags & GTF_FLD_VOLATILE) { chars += printf("[FLD_VOLATILE]"); } break; case GT_INDEX: if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { chars += printf("[INX_STRING_LAYOUT]"); } FALLTHROUGH; case GT_INDEX_ADDR: if (tree->gtFlags & GTF_INX_RNGCHK) { chars += printf("[INX_RNGCHK]"); } break; case GT_IND: case GT_STOREIND: if (tree->gtFlags & GTF_IND_VOLATILE) { chars += printf("[IND_VOLATILE]"); } if (tree->gtFlags & GTF_IND_TGTANYWHERE) { chars += printf("[IND_TGTANYWHERE]"); } if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP) { chars += printf("[IND_TGT_NOT_HEAP]"); } if (tree->gtFlags & GTF_IND_TLS_REF) { chars += printf("[IND_TLS_REF]"); } if (tree->gtFlags & GTF_IND_ASG_LHS) { chars += printf("[IND_ASG_LHS]"); } if (tree->gtFlags & GTF_IND_UNALIGNED) { chars += printf("[IND_UNALIGNED]"); } if (tree->gtFlags & GTF_IND_INVARIANT) { chars += printf("[IND_INVARIANT]"); } if (tree->gtFlags & GTF_IND_NONNULL) { chars += printf("[IND_NONNULL]"); } break; case GT_CLS_VAR: if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) { chars += printf("[CLS_VAR_ASG_LHS]"); } break; case GT_MUL: #if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) { chars += printf("[64RSLT]"); } if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_ADD: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_LSH: if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) { chars += printf("[ADDRMODE_NO_CSE]"); } break; case GT_MOD: case GT_UMOD: break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GT: case GT_GE: if (tree->gtFlags & GTF_RELOP_NAN_UN) { chars += printf("[RELOP_NAN_UN]"); } if (tree->gtFlags & GTF_RELOP_JMP_USED) { chars += printf("[RELOP_JMP_USED]"); } break; case GT_QMARK: if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) { chars += printf("[QMARK_CAST_INSTOF]"); } break; case GT_BOX: if (tree->gtFlags & GTF_BOX_VALUE) { chars += printf("[BOX_VALUE]"); } break; case GT_CNS_INT: { GenTreeFlags handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); switch (handleKind) { case GTF_ICON_SCOPE_HDL: chars += printf("[ICON_SCOPE_HDL]"); break; case GTF_ICON_CLASS_HDL: chars += printf("[ICON_CLASS_HDL]"); break; case GTF_ICON_METHOD_HDL: chars += printf("[ICON_METHOD_HDL]"); break; case GTF_ICON_FIELD_HDL: chars += printf("[ICON_FIELD_HDL]"); break; case GTF_ICON_STATIC_HDL: chars += printf("[ICON_STATIC_HDL]"); break; case GTF_ICON_STR_HDL: chars += printf("[ICON_STR_HDL]"); break; case GTF_ICON_CONST_PTR: chars += printf("[ICON_CONST_PTR]"); break; case GTF_ICON_GLOBAL_PTR: chars += printf("[ICON_GLOBAL_PTR]"); break; case GTF_ICON_VARG_HDL: chars += printf("[ICON_VARG_HDL]"); break; case GTF_ICON_PINVKI_HDL: chars += printf("[ICON_PINVKI_HDL]"); break; case GTF_ICON_TOKEN_HDL: chars += printf("[ICON_TOKEN_HDL]"); break; case GTF_ICON_TLS_HDL: chars += printf("[ICON_TLD_HDL]"); break; case GTF_ICON_FTN_ADDR: chars += printf("[ICON_FTN_ADDR]"); break; case GTF_ICON_CIDMID_HDL: chars += printf("[ICON_CIDMID_HDL]"); break; case GTF_ICON_BBC_PTR: chars += printf("[ICON_BBC_PTR]"); break; case GTF_ICON_STATIC_BOX_PTR: chars += printf("[GTF_ICON_STATIC_BOX_PTR]"); break; case GTF_ICON_FIELD_OFF: chars += printf("[ICON_FIELD_OFF]"); break; default: assert(!"a forgotten handle flag"); break; } } break; case GT_OBJ: case GT_STORE_OBJ: if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } FALLTHROUGH; case GT_BLK: case GT_STORE_BLK: case GT_STORE_DYN_BLK: if (tree->gtFlags & GTF_BLK_VOLATILE) { chars += printf("[BLK_VOLATILE]"); } if (tree->AsBlk()->IsUnaligned()) { chars += printf("[BLK_UNALIGNED]"); } break; case GT_CALL: if (tree->gtFlags & GTF_CALL_UNMANAGED) { chars += printf("[CALL_UNMANAGED]"); } if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) { chars += printf("[CALL_INLINE_CANDIDATE]"); } if (!tree->AsCall()->IsVirtual()) { chars += printf("[CALL_NONVIRT]"); } if (tree->AsCall()->IsVirtualVtable()) { chars += printf("[CALL_VIRT_VTABLE]"); } if (tree->AsCall()->IsVirtualStub()) { chars += printf("[CALL_VIRT_STUB]"); } if (tree->gtFlags & GTF_CALL_NULLCHECK) { chars += printf("[CALL_NULLCHECK]"); } if (tree->gtFlags & GTF_CALL_POP_ARGS) { chars += printf("[CALL_POP_ARGS]"); } if (tree->gtFlags & GTF_CALL_HOISTABLE) { chars += printf("[CALL_HOISTABLE]"); } // More flags associated with calls. { GenTreeCall* call = tree->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) { chars += printf("[CALL_M_EXPLICIT_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) { chars += printf("[CALL_M_TAILCALL]"); } if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) { chars += printf("[CALL_M_VARARGS]"); } if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) { chars += printf("[CALL_M_RETBUFFARG]"); } if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) { chars += printf("[CALL_M_DELEGATE_INV]"); } if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) { chars += printf("[CALL_M_NOGCCHECK]"); } if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { chars += printf("[CALL_M_SPECIAL_INTRINSIC]"); } if (call->IsUnmanaged()) { if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { chars += printf("[CALL_M_UNMGD_THISCALL]"); } } else if (call->IsVirtualStub()) { if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) { chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]"); } } else if (!call->IsVirtual()) { if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) { chars += printf("[CALL_M_NONVIRT_SAME_THIS]"); } } if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) { chars += printf("[CALL_M_FRAME_VAR_DEATH]"); } if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER) { chars += printf("[CALL_M_TAILCALL_VIA_JIT_HELPER]"); } #if FEATURE_TAILCALL_OPT if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) { chars += printf("[CALL_M_IMPLICIT_TAILCALL]"); } #endif if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) { chars += printf("[CALL_M_PINVOKE]"); } if (call->IsFatPointerCandidate()) { chars += printf("[CALL_FAT_POINTER_CANDIDATE]"); } if (call->IsGuarded()) { chars += printf("[CALL_GUARDED]"); } if (call->IsExpRuntimeLookup()) { chars += printf("[CALL_EXP_RUNTIME_LOOKUP]"); } } break; default: { GenTreeFlags flags = (tree->gtFlags & (~(GTF_COMMON_MASK | GTF_OVERFLOW))); if (flags != 0) { chars += printf("[%08X]", flags); } } break; } // Common flags. if (tree->gtFlags & GTF_ASG) { chars += printf("[ASG]"); } if (tree->gtFlags & GTF_CALL) { chars += printf("[CALL]"); } switch (op) { case GT_MUL: case GT_CAST: case GT_ADD: case GT_SUB: if (tree->gtFlags & GTF_OVERFLOW) { chars += printf("[OVERFLOW]"); } break; default: break; } if (tree->gtFlags & GTF_EXCEPT) { chars += printf("[EXCEPT]"); } if (tree->gtFlags & GTF_GLOB_REF) { chars += printf("[GLOB_REF]"); } if (tree->gtFlags & GTF_ORDER_SIDEEFF) { chars += printf("[ORDER_SIDEEFF]"); } if (tree->gtFlags & GTF_REVERSE_OPS) { if (op != GT_LCL_VAR) { chars += printf("[REVERSE_OPS]"); } } if (tree->gtFlags & GTF_SPILLED) { chars += printf("[SPILLED_OPER]"); } #if FEATURE_SET_FLAGS if (tree->gtFlags & GTF_SET_FLAGS) { if ((op != GT_IND) && (op != GT_STOREIND)) { chars += printf("[ZSF_SET_FLAGS]"); } } #endif if (tree->gtFlags & GTF_IND_NONFAULTING) { if (tree->OperIsIndirOrArrLength()) { chars += printf("[IND_NONFAULTING]"); } } if (tree->gtFlags & GTF_MAKE_CSE) { chars += printf("[MAKE_CSE]"); } if (tree->gtFlags & GTF_DONT_CSE) { chars += printf("[DONT_CSE]"); } if (tree->gtFlags & GTF_BOOLEAN) { chars += printf("[BOOLEAN]"); } if (tree->gtFlags & GTF_UNSIGNED) { chars += printf("[SMALL_UNSIGNED]"); } if (tree->gtFlags & GTF_LATE_ARG) { chars += printf("[SMALL_LATE_ARG]"); } if (tree->gtFlags & GTF_SPILL) { chars += printf("[SPILL]"); } if (tree->gtFlags & GTF_REUSE_REG_VAL) { if (op == GT_CNS_INT) { chars += printf("[REUSE_REG_VAL]"); } } } } void dTreeFlags(GenTree* tree) { cTreeFlags(JitTls::GetCompiler(), tree); } #endif // DEBUG #if VARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log"); #endif #if ALLVARSET_COUNTOPS // static BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log"); #endif // static HelperCallProperties Compiler::s_helperCallProperties; /*****************************************************************************/ /*****************************************************************************/ //------------------------------------------------------------------------ // killGCRefs: // Given some tree node return does it need all GC refs to be spilled from // callee save registers. // // Arguments: // tree - the tree for which we ask about gc refs. // // Return Value: // true - tree kills GC refs on callee save registers // false - tree doesn't affect GC refs on callee save registers bool Compiler::killGCRefs(GenTree* tree) { if (tree->IsCall()) { GenTreeCall* call = tree->AsCall(); if (call->IsUnmanaged()) { return true; } if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) { assert(opts.ShouldUsePInvokeHelpers()); return true; } } else if (tree->OperIs(GT_START_PREEMPTGC)) { return true; } return false; } //------------------------------------------------------------------------ // lvaIsOSRLocal: check if this local var is one that requires special // treatment for OSR compilations. // // Arguments: // varNum - variable of interest // // Return Value: // true - this is an OSR compile and this local requires special treatment // false - not an OSR compile, or not an interesting local for OSR bool Compiler::lvaIsOSRLocal(unsigned varNum) { if (!opts.IsOSR()) { return false; } if (varNum < info.compLocalsCount) { return true; } LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsStructField) { return (varDsc->lvParentLcl < info.compLocalsCount); } return false; } //------------------------------------------------------------------------------ // gtTypeForNullCheck: helper to get the most optimal and correct type for nullcheck // // Arguments: // tree - the node for nullcheck; // var_types Compiler::gtTypeForNullCheck(GenTree* tree) { if (varTypeIsIntegral(tree)) { #if defined(TARGET_XARCH) // Just an optimization for XARCH - smaller mov if (varTypeIsLong(tree)) { return TYP_INT; } #endif return tree->TypeGet(); } // for the rest: probe a single byte to avoid potential AVEs return TYP_BYTE; } //------------------------------------------------------------------------------ // gtChangeOperToNullCheck: helper to change tree oper to a NULLCHECK. // // Arguments: // tree - the node to change; // basicBlock - basic block of the node. // // Notes: // the function should not be called after lowering for platforms that do not support // emitting NULLCHECK nodes, like arm32. Use `Lowering::TransformUnusedIndirection` // that handles it and calls this function when appropriate. // void Compiler::gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block) { assert(tree->OperIs(GT_FIELD, GT_IND, GT_OBJ, GT_BLK)); tree->ChangeOper(GT_NULLCHECK); tree->ChangeType(gtTypeForNullCheck(tree)); block->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; } #if defined(DEBUG) //------------------------------------------------------------------------------ // devirtualizationDetailToString: describe the detailed devirtualization reason // // Arguments: // detail - detail to describe // // Returns: // descriptive string // const char* Compiler::devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail) { switch (detail) { case CORINFO_DEVIRTUALIZATION_UNKNOWN: return "unknown"; case CORINFO_DEVIRTUALIZATION_SUCCESS: return "success"; case CORINFO_DEVIRTUALIZATION_FAILED_CANON: return "object class was canonical"; case CORINFO_DEVIRTUALIZATION_FAILED_COM: return "object class was com"; case CORINFO_DEVIRTUALIZATION_FAILED_CAST: return "object class could not be cast to interface class"; case CORINFO_DEVIRTUALIZATION_FAILED_LOOKUP: return "interface method could not be found"; case CORINFO_DEVIRTUALIZATION_FAILED_DIM: return "interface method was default interface method"; case CORINFO_DEVIRTUALIZATION_FAILED_SUBCLASS: return "object not subclass of base class"; case CORINFO_DEVIRTUALIZATION_FAILED_SLOT: return "virtual method installed via explicit override"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE: return "devirtualization crossed version bubble"; case CORINFO_DEVIRTUALIZATION_MULTIPLE_IMPL: return "object class has multiple implementations of interface"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_CLASS_DECL: return "decl method is defined on class and decl method not in version bubble, and decl method not in " "type closest to version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_INTERFACE_DECL: return "decl method is defined on interface and not in version bubble, and implementation type not " "entirely defined in bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL: return "object class not defined within version bubble"; case CORINFO_DEVIRTUALIZATION_FAILED_BUBBLE_IMPL_NOT_REFERENCEABLE: return "object class cannot be referenced from R2R code due to missing tokens"; case CORINFO_DEVIRTUALIZATION_FAILED_DUPLICATE_INTERFACE: return "crossgen2 virtual method algorithm and runtime algorithm differ in the presence of duplicate " "interface implementations"; case CORINFO_DEVIRTUALIZATION_FAILED_DECL_NOT_REPRESENTABLE: return "Decl method cannot be represented in R2R image"; default: return "undefined"; } } #endif // defined(DEBUG) #if TRACK_ENREG_STATS Compiler::EnregisterStats Compiler::s_enregisterStats; void Compiler::EnregisterStats::RecordLocal(const LclVarDsc* varDsc) { m_totalNumberOfVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructVars++; } if (!varDsc->lvDoNotEnregister) { m_totalNumberOfEnregVars++; if (varDsc->TypeGet() == TYP_STRUCT) { m_totalNumberOfStructEnregVars++; } } else { switch (varDsc->GetDoNotEnregReason()) { case DoNotEnregisterReason::AddrExposed: m_addrExposed++; break; case DoNotEnregisterReason::DontEnregStructs: m_dontEnregStructs++; break; case DoNotEnregisterReason::NotRegSizeStruct: m_notRegSizeStruct++; break; case DoNotEnregisterReason::LocalField: m_localField++; break; case DoNotEnregisterReason::VMNeedsStackAddr: m_VMNeedsStackAddr++; break; case DoNotEnregisterReason::LiveInOutOfHandler: m_liveInOutHndlr++; break; case DoNotEnregisterReason::BlockOp: m_blockOp++; break; case DoNotEnregisterReason::IsStructArg: m_structArg++; break; case DoNotEnregisterReason::DepField: m_depField++; break; case DoNotEnregisterReason::NoRegVars: m_noRegVars++; break; case DoNotEnregisterReason::MinOptsGC: m_minOptsGC++; break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: m_longParamField++; break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: m_PinningRef++; break; #endif case DoNotEnregisterReason::LclAddrNode: m_lclAddrNode++; break; case DoNotEnregisterReason::CastTakesAddr: m_castTakesAddr++; break; case DoNotEnregisterReason::StoreBlkSrc: m_storeBlkSrc++; break; case DoNotEnregisterReason::OneAsgRetyping: m_oneAsgRetyping++; break; case DoNotEnregisterReason::SwizzleArg: m_swizzleArg++; break; case DoNotEnregisterReason::BlockOpRet: m_blockOpRet++; break; case DoNotEnregisterReason::ReturnSpCheck: m_returnSpCheck++; break; case DoNotEnregisterReason::SimdUserForcesDep: m_simdUserForcesDep++; break; default: unreached(); break; } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::AddrExposed) { // We can't `assert(IsAddressExposed())` because `fgAdjustForAddressExposedOrWrittenThis` // does not clear `m_doNotEnregReason` on `this`. switch (varDsc->GetAddrExposedReason()) { case AddressExposedReason::PARENT_EXPOSED: m_parentExposed++; break; case AddressExposedReason::TOO_CONSERVATIVE: m_tooConservative++; break; case AddressExposedReason::ESCAPE_ADDRESS: m_escapeAddress++; break; case AddressExposedReason::WIDE_INDIR: m_wideIndir++; break; case AddressExposedReason::OSR_EXPOSED: m_osrExposed++; break; case AddressExposedReason::STRESS_LCL_FLD: m_stressLclFld++; break; case AddressExposedReason::COPY_FLD_BY_FLD: m_copyFldByFld++; break; case AddressExposedReason::DISPATCH_RET_BUF: m_dispatchRetBuf++; break; default: unreached(); break; } } } } void Compiler::EnregisterStats::Dump(FILE* fout) const { const unsigned totalNumberOfNotStructVars = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfStructVars; const unsigned totalNumberOfNotStructEnregVars = s_enregisterStats.m_totalNumberOfEnregVars - s_enregisterStats.m_totalNumberOfStructEnregVars; const unsigned notEnreg = s_enregisterStats.m_totalNumberOfVars - s_enregisterStats.m_totalNumberOfEnregVars; fprintf(fout, "\nLocals enregistration statistics:\n"); if (m_totalNumberOfVars == 0) { fprintf(fout, "No locals to report.\n"); return; } fprintf(fout, "total number of locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfVars, m_totalNumberOfEnregVars, m_totalNumberOfVars - m_totalNumberOfEnregVars, (float)m_totalNumberOfEnregVars / m_totalNumberOfVars); if (m_totalNumberOfStructVars != 0) { fprintf(fout, "total number of struct locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", m_totalNumberOfStructVars, m_totalNumberOfStructEnregVars, m_totalNumberOfStructVars - m_totalNumberOfStructEnregVars, (float)m_totalNumberOfStructEnregVars / m_totalNumberOfStructVars); } const unsigned numberOfPrimitiveLocals = totalNumberOfNotStructVars - totalNumberOfNotStructEnregVars; if (numberOfPrimitiveLocals != 0) { fprintf(fout, "total number of primitive locals: %d, number of enregistered: %d, notEnreg: %d, ratio: %.2f\n", totalNumberOfNotStructVars, totalNumberOfNotStructEnregVars, numberOfPrimitiveLocals, (float)totalNumberOfNotStructEnregVars / totalNumberOfNotStructVars); } if (notEnreg == 0) { fprintf(fout, "All locals are enregistered.\n"); return; } #define PRINT_STATS(stat, total) \ if (stat != 0) \ { \ fprintf(fout, #stat " %d, ratio: %.2f\n", stat, (float)stat / total); \ } PRINT_STATS(m_addrExposed, notEnreg); PRINT_STATS(m_dontEnregStructs, notEnreg); PRINT_STATS(m_notRegSizeStruct, notEnreg); PRINT_STATS(m_localField, notEnreg); PRINT_STATS(m_VMNeedsStackAddr, notEnreg); PRINT_STATS(m_liveInOutHndlr, notEnreg); PRINT_STATS(m_blockOp, notEnreg); PRINT_STATS(m_structArg, notEnreg); PRINT_STATS(m_depField, notEnreg); PRINT_STATS(m_noRegVars, notEnreg); PRINT_STATS(m_minOptsGC, notEnreg); #if !defined(TARGET_64BIT) PRINT_STATS(m_longParamField, notEnreg); #endif // !TARGET_64BIT #ifdef JIT32_GCENCODER PRINT_STATS(m_PinningRef, notEnreg); #endif // JIT32_GCENCODER PRINT_STATS(m_lclAddrNode, notEnreg); PRINT_STATS(m_castTakesAddr, notEnreg); PRINT_STATS(m_storeBlkSrc, notEnreg); PRINT_STATS(m_oneAsgRetyping, notEnreg); PRINT_STATS(m_swizzleArg, notEnreg); PRINT_STATS(m_blockOpRet, notEnreg); PRINT_STATS(m_returnSpCheck, notEnreg); PRINT_STATS(m_simdUserForcesDep, notEnreg); fprintf(fout, "\nAddr exposed details:\n"); if (m_addrExposed == 0) { fprintf(fout, "\nNo address exposed locals to report.\n"); return; } PRINT_STATS(m_parentExposed, m_addrExposed); PRINT_STATS(m_tooConservative, m_addrExposed); PRINT_STATS(m_escapeAddress, m_addrExposed); PRINT_STATS(m_wideIndir, m_addrExposed); PRINT_STATS(m_osrExposed, m_addrExposed); PRINT_STATS(m_stressLclFld, m_addrExposed); PRINT_STATS(m_copyFldByFld, m_addrExposed); PRINT_STATS(m_dispatchRetBuf, m_addrExposed); } #endif // TRACK_ENREG_STATS
1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/compiler.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Should we support SIMD intrinsics? bool featureSIMD; // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSWAR(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTree* impExpandHalfConstEqualsSIMD(GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD // Should we support SIMD intrinsics? bool featureSIMD; // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/fginline.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Inline Support /*****************************************************************************/ //------------------------------------------------------------------------ // fgCheckForInlineDepthAndRecursion: compute depth of the candidate, and // check for recursion. // // Return Value: // The depth of the inline candidate. The root method is a depth 0, top-level // candidates at depth 1, etc. // // Notes: // We generally disallow recursive inlines by policy. However, they are // supported by the underlying machinery. // // Likewise the depth limit is a policy consideration, and serves mostly // as a safeguard to prevent runaway inlining of small methods. // unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo) { BYTE* candidateCode = inlineInfo->inlineCandidateInfo->methInfo.ILCode; InlineContext* inlineContext = inlineInfo->inlineCandidateInfo->inlinersContext; InlineResult* inlineResult = inlineInfo->inlineResult; // There should be a context for all candidates. assert(inlineContext != nullptr); int depth = 0; for (; inlineContext != nullptr; inlineContext = inlineContext->GetParent()) { assert(inlineContext->GetCode() != nullptr); depth++; if (inlineContext->GetCode() == candidateCode) { // This inline candidate has the same IL code buffer as an already // inlined method does. inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_RECURSIVE); // No need to note CALLSITE_DEPTH we're already rejecting this candidate return depth; } if (depth > InlineStrategy::IMPLEMENTATION_MAX_INLINE_DEPTH) { break; } } inlineResult->NoteInt(InlineObservation::CALLSITE_DEPTH, depth); return depth; } //------------------------------------------------------------------------ // fgInline - expand inline candidates // // Returns: // phase status indicating if anything was modified // // Notes: // Inline candidates are identified during importation and candidate calls // must be top-level expressions. In input IR, the result of the call (if any) // is consumed elsewhere by a GT_RET_EXPR node. // // For successful inlines, calls are replaced by a sequence of argument setup // instructions, the inlined method body, and return value cleanup. Note // Inlining may introduce new inline candidates. These are processed in a // depth-first fashion, as the inliner walks the IR in statement order. // // After inline expansion in a statement, the statement tree // is walked to locate GT_RET_EXPR nodes. These are replaced by either // * the original call tree, if the inline failed // * the return value tree from the inlinee, if the inline succeeded // // This replacement happens in preorder; on the postorder side of the same // tree walk, we look for opportunties to devirtualize or optimize now that // we know the context for the newly supplied return value tree. // // Inline arguments may be directly substituted into the body of the inlinee // in some cases. See impInlineFetchArg. // PhaseStatus Compiler::fgInline() { if (!opts.OptEnabled(CLFLG_INLINING)) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); #endif // DEBUG noway_assert(fgFirstBB != nullptr); BasicBlock* block = fgFirstBB; bool madeChanges = false; do { // Make the current basic block address available globally compCurBB = block; for (Statement* const stmt : block->Statements()) { #if defined(DEBUG) || defined(INLINE_DATA) // In debug builds we want the inline tree to show all failed // inlines. Some inlines may fail very early and never make it to // candidate stage. So scan the tree looking for those early failures. fgWalkTreePre(stmt->GetRootNodePointer(), fgFindNonInlineCandidate, stmt); #endif GenTree* expr = stmt->GetRootNode(); // The importer ensures that all inline candidates are // statement expressions. So see if we have a call. if (expr->IsCall()) { GenTreeCall* call = expr->AsCall(); // We do. Is it an inline candidate? // // Note we also process GuardeDevirtualizationCandidates here as we've // split off GT_RET_EXPRs for them even when they are not inline candidates // as we need similar processing to ensure they get patched back to where // they belong. if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { InlineResult inlineResult(this, call, stmt, "fgInline"); fgMorphStmt = stmt; fgMorphCallInline(call, &inlineResult); // If there's a candidate to process, we will make changes madeChanges = true; // fgMorphCallInline may have updated the // statement expression to a GT_NOP if the // call returned a value, regardless of // whether the inline succeeded or failed. // // If so, remove the GT_NOP and continue // on with the next statement. if (stmt->GetRootNode()->IsNothingNode()) { fgRemoveStmt(block, stmt); continue; } } } // See if we need to replace some return value place holders. // Also, see if this replacement enables further devirtualization. // // Note we have both preorder and postorder callbacks here. // // The preorder callback is responsible for replacing GT_RET_EXPRs // with the appropriate expansion (call or inline result). // Replacement may introduce subtrees with GT_RET_EXPR and so // we rely on the preorder to recursively process those as well. // // On the way back up, the postorder callback then re-examines nodes for // possible further optimization, as the (now complete) GT_RET_EXPR // replacement may have enabled optimizations by providing more // specific types for trees or variables. fgWalkTree(stmt->GetRootNodePointer(), fgUpdateInlineReturnExpressionPlaceHolder, fgLateDevirtualization, (void*)&madeChanges); // See if stmt is of the form GT_COMMA(call, nop) // If yes, we can get rid of GT_COMMA. if (expr->OperGet() == GT_COMMA && expr->AsOp()->gtOp1->OperGet() == GT_CALL && expr->AsOp()->gtOp2->OperGet() == GT_NOP) { madeChanges = true; stmt->SetRootNode(expr->AsOp()->gtOp1); } } block = block->bbNext; } while (block); #ifdef DEBUG // Check that we should not have any inline candidate or return value place holder left. block = fgFirstBB; noway_assert(block); do { for (Statement* const stmt : block->Statements()) { // Call Compiler::fgDebugCheckInlineCandidates on each node fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } block = block->bbNext; } while (block); fgVerifyHandlerTab(); if (verbose || fgPrintInlinedMethods) { JITDUMP("**************** Inline Tree"); printf("\n"); m_inlineStrategy->Dump(verbose || JitConfig.JitPrintInlinedMethodsVerbose()); } #endif // DEBUG return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // fgFindNonInlineCandidate: tree walk helper to ensure that a tree node // that is not an inline candidate is noted as a failed inline. // // Arguments: // pTree - pointer to pointer tree node being walked // data - contextual data for the walk // // Return Value: // walk result // // Note: // Invokes fgNoteNonInlineCandidate on the nodes it finds. Compiler::fgWalkResult Compiler::fgFindNonInlineCandidate(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { Compiler* compiler = data->compiler; Statement* stmt = (Statement*)data->pCallbackData; GenTreeCall* call = tree->AsCall(); compiler->fgNoteNonInlineCandidate(stmt, call); } return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgNoteNonInlineCandidate: account for inlining failures in calls // not marked as inline candidates. // // Arguments: // stmt - statement containing the call // call - the call itself // // Notes: // Used in debug only to try and place descriptions of inline failures // into the proper context in the inline tree. void Compiler::fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call) { if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { return; } InlineResult inlineResult(this, call, nullptr, "fgNoteNonInlineCandidate"); InlineObservation currentObservation = InlineObservation::CALLSITE_NOT_CANDIDATE; // Try and recover the reason left behind when the jit decided // this call was not a candidate. InlineObservation priorObservation = call->gtInlineObservation; if (InlIsValidObservation(priorObservation)) { currentObservation = priorObservation; } // Propagate the prior failure observation to this result. inlineResult.NotePriorFailure(currentObservation); inlineResult.SetReported(); if (call->gtCallType == CT_USER_FUNC) { m_inlineStrategy->NewContext(call->gtInlineContext, stmt, call)->SetFailed(&inlineResult); } } #endif #if FEATURE_MULTIREG_RET /********************************************************************************* * * tree - The node which needs to be converted to a struct pointer. * * Return the pointer by either __replacing__ the tree node with a suitable pointer * type or __without replacing__ and just returning a subtree or by __modifying__ * a subtree. */ GenTree* Compiler::fgGetStructAsStructPtr(GenTree* tree) { noway_assert(tree->OperIs(GT_LCL_VAR, GT_FIELD, GT_IND, GT_BLK, GT_OBJ, GT_COMMA) || tree->OperIsSIMD() || tree->OperIsHWIntrinsic()); // GT_CALL, cannot get address of call. // GT_MKREFANY, inlining should've been aborted due to mkrefany opcode. // GT_RET_EXPR, cannot happen after fgUpdateInlineReturnExpressionPlaceHolder switch (tree->OperGet()) { case GT_BLK: case GT_OBJ: case GT_IND: return tree->AsOp()->gtOp1; case GT_COMMA: tree->AsOp()->gtOp2 = fgGetStructAsStructPtr(tree->AsOp()->gtOp2); tree->gtType = TYP_BYREF; return tree; default: return gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } /*************************************************************************************************** * child - The inlinee of the retExpr node. * retClsHnd - The struct class handle of the type of the inlinee. * * Assign the inlinee to a tmp, if it is a call, just assign it to a lclVar, else we can * use a copyblock to do the assignment. */ GenTree* Compiler::fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { assert(child->gtOper != GT_RET_EXPR && child->gtOper != GT_MKREFANY); unsigned tmpNum = lvaGrabTemp(false DEBUGARG("RetBuf for struct inline return candidates.")); lvaSetStruct(tmpNum, retClsHnd, false); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); // If we have a call, we'd like it to be: V00 = call(), but first check if // we have a ", , , call()" -- this is very defensive as we may never get // an inlinee that is made of commas. If the inlinee is not a call, then // we use a copy block to do the assignment. GenTree* src = child; GenTree* lastComma = nullptr; while (src->gtOper == GT_COMMA) { lastComma = src; src = src->AsOp()->gtOp2; } GenTree* newInlinee = nullptr; if (src->gtOper == GT_CALL) { // If inlinee was just a call, new inlinee is v05 = call() newInlinee = gtNewAssignNode(dst, src); // When returning a multi-register value in a local var, make sure the variable is // marked as lvIsMultiRegRet, so it does not get promoted. if (src->AsCall()->HasMultiRegRetVal()) { lvaTable[tmpNum].lvIsMultiRegRet = true; } // If inlinee was comma, but a deeper call, new inlinee is (, , , v05 = call()) if (child->gtOper == GT_COMMA) { lastComma->AsOp()->gtOp2 = newInlinee; newInlinee = child; } } else { // Inlinee is not a call, so just create a copy block to the tmp. src = child; GenTree* dstAddr = fgGetStructAsStructPtr(dst); GenTree* srcAddr = fgGetStructAsStructPtr(src); newInlinee = gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false); } GenTree* production = gtNewLclvNode(tmpNum, structType); return gtNewOperNode(GT_COMMA, structType, newInlinee, production); } /*************************************************************************************************** * tree - The tree pointer that has one of its child nodes as retExpr. * child - The inlinee child. * retClsHnd - The struct class handle of the type of the inlinee. * * V04 = call() assignments are okay as we codegen it. Everything else needs to be a copy block or * would need a temp. For example, a cast(ldobj) will then be, cast(v05 = ldobj, v05); But it is * a very rare (or impossible) scenario that we'd have a retExpr transform into a ldobj other than * a lclVar/call. So it is not worthwhile to do pattern matching optimizations like addr(ldobj(op1)) * can just be op1. */ void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { // We are okay to have: // 1. V02 = call(); // 2. copyBlk(dstAddr, srcAddr); assert(tree->gtOper == GT_ASG); // We have an assignment, we codegen only V05 = call(). if (child->gtOper == GT_CALL && tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { // If it is a multireg return on x64/ux, the local variable should be marked as lvIsMultiRegRet if (child->AsCall()->HasMultiRegRetVal()) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; } return; } GenTree* dstAddr = fgGetStructAsStructPtr(tree->AsOp()->gtOp1); GenTree* srcAddr = fgGetStructAsStructPtr( (child->gtOper == GT_CALL) ? fgAssignStructInlineeToVar(child, retClsHnd) // Assign to a variable if it is a call. : child); // Just get the address, if not a call. tree->ReplaceWith(gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false), this); } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // fgUpdateInlineReturnExpressionPlaceHolder: callback to replace the // inline return expression placeholder. // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // Looks for GT_RET_EXPR nodes that arose from tree splitting done // during importation for inline candidates, and replaces them. // // For successful inlines, substitutes the return value expression // from the inline body for the GT_RET_EXPR. // // For failed inlines, rejoins the original call into the tree from // whence it was split during importation. // // The code doesn't actually know if the corresponding inline // succeeded or not; it relies on the fact that gtInlineCandidate // initially points back at the call and is modified in place to // the inlinee return expression if the inline is successful (see // tail end of fgInsertInlineeBlocks for the update of iciCall). // // If the return type is a struct type and we're on a platform // where structs can be returned in multiple registers, ensure the // call has a suitable parent. // // If the original call type and the substitution type are different // the functions makes necessary updates. It could happen if there was // an implicit conversion in the inlinee body. // Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) { // All the operations here and in the corresponding postorder // callback (fgLateDevirtualization) are triggered by GT_CALL or // GT_RET_EXPR trees, and these (should) have the call side // effect flag. // // So bail out for any trees that don't have this flag. GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { return WALK_SKIP_SUBTREES; } bool* madeChanges = static_cast<bool*>(data->pCallbackData); Compiler* comp = data->compiler; CORINFO_CLASS_HANDLE retClsHnd = NO_CLASS_HANDLE; while (tree->OperGet() == GT_RET_EXPR) { // We are going to copy the tree from the inlinee, // so record the handle now. // if (varTypeIsStruct(tree)) { retClsHnd = tree->AsRetExpr()->gtRetClsHnd; } // Skip through chains of GT_RET_EXPRs (say from nested inlines) // to the actual tree to use. // // Also we might as well try and fold the return value. // Eg returns of constant bools will have CASTS. // This folding may uncover more GT_RET_EXPRs, so we loop around // until we've got something distinct. // BasicBlockFlags bbFlags = BBF_EMPTY; GenTree* inlineCandidate = tree->gtRetExprVal(&bbFlags); inlineCandidate = comp->gtFoldExpr(inlineCandidate); var_types retType = tree->TypeGet(); #ifdef DEBUG if (comp->verbose) { printf("\nReplacing the return expression placeholder "); printTreeID(tree); printf(" with "); printTreeID(inlineCandidate); printf("\n"); // Dump out the old return expression placeholder it will be overwritten by the ReplaceWith below comp->gtDispTree(tree); } #endif // DEBUG var_types newType = inlineCandidate->TypeGet(); // If we end up swapping type we may need to retype the tree: if (retType != newType) { if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) { // - in an RVA static if we've reinterpreted it as a byref; assert(newType == TYP_I_IMPL); JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); inlineCandidate->gtType = TYP_BYREF; } else { // - under a call if we changed size of the argument. GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType); if (putArgType != nullptr) { inlineCandidate = putArgType; } } } tree->ReplaceWith(inlineCandidate, comp); *madeChanges = true; comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); #ifdef DEBUG if (comp->verbose) { printf("\nInserting the inline return expression\n"); comp->gtDispTree(tree); printf("\n"); } #endif // DEBUG } // If an inline was rejected and the call returns a struct, we may // have deferred some work when importing call for cases where the // struct is returned in register(s). // // See the bail-out clauses in impFixupCallStructReturn for inline // candidates. // // Do the deferred work now. if (retClsHnd != NO_CLASS_HANDLE) { structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, CorInfoCallConvExtension::Managed, &howToReturnStruct); GenTree* parent = data->parent; switch (howToReturnStruct) { #if FEATURE_MULTIREG_RET // Is this a type that is returned in multiple registers // or a via a primitve type that is larger than the struct type? // if so we need to force into into a form we accept. // i.e. LclVar = call() case SPK_ByValue: case SPK_ByValueAsHfa: { // See assert below, we only look one level above for an asg parent. if (parent->gtOper == GT_ASG) { // Either lhs is a call V05 = call(); or lhs is addr, and asg becomes a copyBlk. comp->fgAttachStructInlineeToAsg(parent, tree, retClsHnd); } else { // Just assign the inlinee to a variable to keep it simple. tree->ReplaceWith(comp->fgAssignStructInlineeToVar(tree, retClsHnd), comp); } *madeChanges = true; } break; #endif // FEATURE_MULTIREG_RET case SPK_EnclosingType: case SPK_PrimitiveType: // No work needs to be done, the call has struct type and should keep it. break; case SPK_ByReference: // We should have already added the return buffer // when we first imported the call break; default: noway_assert(!"Unexpected struct passing kind"); break; } } #if FEATURE_MULTIREG_RET #if defined(DEBUG) // Make sure we don't have a tree like so: V05 = (, , , retExpr); // Since we only look one level above for the parent for '=' and // do not check if there is a series of COMMAs. See above. // Importer and FlowGraph will not generate such a tree, so just // leaving an assert in here. This can be fixed by looking ahead // when we visit GT_ASG similar to fgAttachStructInlineeToAsg. // if (tree->OperGet() == GT_ASG) { GenTree* value = tree->AsOp()->gtOp2; if (value->OperGet() == GT_COMMA) { GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true); noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) || !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd, CorInfoCallConvExtension::Managed)); } } #endif // defined(DEBUG) #endif // FEATURE_MULTIREG_RET return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgLateDevirtualization: re-examine calls after inlining to see if we // can do more devirtualization // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // We used to check this opportunistically in the preorder callback for // calls where the `obj` was fed by a return, but we now re-examine // all calls. // // Late devirtualization (and eventually, perhaps, other type-driven // opts like cast optimization) can happen now because inlining or other // optimizations may have provided more accurate types than we saw when // first importing the trees. // // It would be nice to screen candidate sites based on the likelihood // that something has changed. Otherwise we'll waste some time retrying // an optimization that will just fail again. Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; GenTree* parent = data->parent; Compiler* comp = data->compiler; bool* madeChanges = static_cast<bool*>(data->pCallbackData); // In some (rare) cases the parent node of tree will be smashed to a NOP during // the preorder by fgAttachStructToInlineeArg. // // jit\Methodical\VT\callconv\_il_reljumper3 for x64 linux // // If so, just bail out here. if (tree == nullptr) { assert((parent != nullptr) && parent->OperGet() == GT_NOP); return WALK_CONTINUE; } if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); bool tryLateDevirt = call->IsVirtual() && (call->gtCallType == CT_USER_FUNC); #ifdef DEBUG tryLateDevirt = tryLateDevirt && (JitConfig.JitEnableLateDevirtualization() == 1); #endif // DEBUG if (tryLateDevirt) { #ifdef DEBUG if (comp->verbose) { printf("**** Late devirt opportunity\n"); comp->gtDispTree(call); } #endif // DEBUG CORINFO_CONTEXT_HANDLE context = nullptr; CORINFO_METHOD_HANDLE method = call->gtCallMethHnd; unsigned methodFlags = 0; const bool isLateDevirtualization = true; const bool explicitTailCall = call->IsTailPrefixedCall(); if ((call->gtCallMoreFlags & GTF_CALL_M_LATE_DEVIRT) != 0) { context = call->gtLateDevirtualizationInfo->exactContextHnd; call->gtLateDevirtualizationInfo = nullptr; } comp->impDevirtualizeCall(call, nullptr, &method, &methodFlags, &context, nullptr, isLateDevirtualization, explicitTailCall); *madeChanges = true; } } else if (tree->OperGet() == GT_ASG) { // If we're assigning to a ref typed local that has one definition, // we may be able to sharpen the type for the local. GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) { const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); LclVarDsc* lcl = comp->lvaGetDesc(lclNum); if (lcl->lvSingleDef) { GenTree* rhs = tree->gtGetOp2(); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE newClass = comp->gtGetClassHandle(rhs, &isExact, &isNonNull); if (newClass != NO_CLASS_HANDLE) { comp->lvaUpdateClass(lclNum, newClass, isExact); *madeChanges = true; } } } // If we created a self-assignment (say because we are sharing return spill temps) // we can remove it. // GenTree* const lhs = tree->gtGetOp1(); GenTree* const rhs = tree->gtGetOp2(); if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) { comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); JITDUMP("... removing self-assignment\n"); DISPTREE(tree); tree->gtBashToNOP(); *madeChanges = true; } } else if (tree->OperGet() == GT_JTRUE) { // See if this jtrue is now foldable. BasicBlock* block = comp->compCurBB; GenTree* condTree = tree->AsOp()->gtOp1; assert(tree == block->lastStmt()->GetRootNode()); if (condTree->OperGet() == GT_CNS_INT) { JITDUMP(" ... found foldable jtrue at [%06u] in " FMT_BB "\n", dspTreeID(tree), block->bbNum); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); // We have a constant operand, and should have the all clear to optimize. // Update side effects on the tree, assert there aren't any, and bash to nop. comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == 0); tree->gtBashToNOP(); *madeChanges = true; BasicBlock* bNotTaken = nullptr; if (condTree->AsIntCon()->gtIconVal != 0) { block->bbJumpKind = BBJ_ALWAYS; bNotTaken = block->bbNext; } else { block->bbJumpKind = BBJ_NONE; bNotTaken = block->bbJumpDest; } comp->fgRemoveRefPred(bNotTaken, block); // If that was the last ref, a subsequent flow-opt pass // will clean up the now-unreachable bNotTaken, and any // other transitively unreachable blocks. if (bNotTaken->bbRefs == 0) { JITDUMP("... it looks like " FMT_BB " is now unreachable!\n", bNotTaken->bbNum); } } } else { const var_types retType = tree->TypeGet(); GenTree* foldedTree = comp->gtFoldExpr(tree); GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType); if (putArgType != nullptr) { foldedTree = putArgType; } *pTree = foldedTree; *madeChanges = true; } return WALK_CONTINUE; } #ifdef DEBUG /***************************************************************************** * Callback to make sure there is no more GT_RET_EXPR and GTF_CALL_INLINE_CANDIDATE nodes. */ /* static */ Compiler::fgWalkResult Compiler::fgDebugCheckInlineCandidates(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { assert((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); } else { assert(tree->gtOper != GT_RET_EXPR); } return WALK_CONTINUE; } #endif // DEBUG void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* inlineResult, InlineContext** createdContext) { noway_assert(call->gtOper == GT_CALL); noway_assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0); noway_assert(opts.OptEnabled(CLFLG_INLINING)); // This is the InlineInfo struct representing a method to be inlined. InlineInfo inlineInfo; memset(&inlineInfo, 0, sizeof(inlineInfo)); CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd; inlineInfo.fncHandle = fncHandle; inlineInfo.iciCall = call; inlineInfo.iciStmt = fgMorphStmt; inlineInfo.iciBlock = compCurBB; inlineInfo.thisDereferencedFirst = false; inlineInfo.retExpr = nullptr; inlineInfo.retBB = nullptr; inlineInfo.retExprClassHnd = nullptr; inlineInfo.retExprClassHndIsExact = false; inlineInfo.inlineResult = inlineResult; #ifdef FEATURE_SIMD inlineInfo.hasSIMDTypeArgLocalOrReturn = false; #endif // FEATURE_SIMD InlineCandidateInfo* inlineCandidateInfo = call->gtInlineCandidateInfo; noway_assert(inlineCandidateInfo); // Store the link to inlineCandidateInfo into inlineInfo inlineInfo.inlineCandidateInfo = inlineCandidateInfo; unsigned inlineDepth = fgCheckInlineDepthAndRecursion(&inlineInfo); if (inlineResult->IsFailure()) { #ifdef DEBUG if (verbose) { printf("Recursive or deep inline recursion detected. Will not expand this INLINECANDIDATE \n"); } #endif // DEBUG return; } // Set the trap to catch all errors (including recoverable ones from the EE) struct Param { Compiler* pThis; GenTree* call; CORINFO_METHOD_HANDLE fncHandle; InlineCandidateInfo* inlineCandidateInfo; InlineInfo* inlineInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.inlineCandidateInfo = inlineCandidateInfo; param.inlineInfo = &inlineInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { // Init the local var info of the inlinee pParam->pThis->impInlineInitVars(pParam->inlineInfo); if (pParam->inlineInfo->inlineResult->IsCandidate()) { /* Clear the temp table */ memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); // // Prepare the call to jitNativeCode // pParam->inlineInfo->InlinerCompiler = pParam->pThis; if (pParam->pThis->impInlineInfo == nullptr) { pParam->inlineInfo->InlineRoot = pParam->pThis; } else { pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; } // The inline context is part of debug info and must be created // before we start creating statements; we lazily create it as // late as possible, which is here. pParam->inlineInfo->inlineContext = pParam->inlineInfo->InlineRoot->m_inlineStrategy ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; JITLOG_THIS(pParam->pThis, (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle), pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; // The following flags are lost when inlining. // (This is checked in Compiler::compInitOptions().) compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); compileFlagsForInlinee.Set(JitFlags::JIT_FLAG_SKIP_VERIFICATION); #ifdef DEBUG if (pParam->pThis->verbose) { printf("\nInvoking compiler for the inlinee method %s :\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); } #endif // DEBUG int result = jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); if (result != CORJIT_OK) { // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; if (!innerInlineResult->IsFailure()) { innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } } }, &param); if (!success) { #ifdef DEBUG if (verbose) { printf("\nInlining failed due to an exception during invoking the compiler for the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. if (!inlineResult->IsFailure()) { inlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } *createdContext = inlineInfo.inlineContext; if (inlineResult->IsFailure()) { return; } #ifdef DEBUG if (0 && verbose) { printf("\nDone invoking compiler for the inlinee method %s\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If there is non-NULL return, but we haven't set the pInlineInfo->retExpr, // That means we haven't imported any BB that contains CEE_RET opcode. // (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which // causes the BBJ_RETURN block not to be imported at all.) // Fail the inlining attempt if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr) { #ifdef DEBUG if (verbose) { printf("\nInlining failed because pInlineInfo->retExpr is not set in the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG inlineResult->NoteFatal(InlineObservation::CALLEE_LACKS_RETURN); return; } // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // The inlining attempt cannot be failed starting from this point. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // We've successfully obtain the list of inlinee's basic blocks. // Let's insert it to inliner's basic block list. fgInsertInlineeBlocks(&inlineInfo); #ifdef DEBUG if (verbose) { printf("Successfully inlined %s (%d IL bytes) (depth %d) [%s]\n", eeGetMethodFullName(fncHandle), inlineCandidateInfo->methInfo.ILCodeSize, inlineDepth, inlineResult->ReasonString()); } if (verbose) { printf("--------------------------------------------------------------------------------------------\n"); } #endif // DEBUG #if defined(DEBUG) impInlinedCodeSize += inlineCandidateInfo->methInfo.ILCodeSize; #endif // We inlined... inlineResult->NoteSuccess(); } //------------------------------------------------------------------------ // fgInsertInlineeBlocks: incorporate statements for an inline into the // root method. // // Arguments: // inlineInfo -- info for the inline // // Notes: // The inlining attempt cannot be failed once this method is called. // // Adds all inlinee statements, plus any glue statements needed // either before or after the inlined call. // // Updates flow graph and assigns weights to inlinee // blocks. Currently does not attempt to read IBC data for the // inlinee. // // Updates relevant root method status flags (eg optMethodFlags) to // include information from the inlinee. // // Marks newly added statements with an appropriate inline context. void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { GenTreeCall* iciCall = pInlineInfo->iciCall; Statement* iciStmt = pInlineInfo->iciStmt; BasicBlock* iciBlock = pInlineInfo->iciBlock; noway_assert(iciBlock->bbStmtList != nullptr); noway_assert(iciStmt->GetRootNode() != nullptr); assert(iciStmt->GetRootNode() == iciCall); noway_assert(iciCall->gtOper == GT_CALL); #ifdef DEBUG Statement* currentDumpStmt = nullptr; if (verbose) { printf("\n\n----------- Statements (and blocks) added due to the inlining of call "); printTreeID(iciCall); printf(" -----------\n"); } #endif // DEBUG // Mark success. pInlineInfo->inlineContext->SetSucceeded(pInlineInfo); // Prepend statements Statement* stmtAfter = fgInlinePrependStatements(pInlineInfo); #ifdef DEBUG if (verbose) { currentDumpStmt = stmtAfter; printf("\nInlinee method body:"); } #endif // DEBUG BasicBlock* topBlock = iciBlock; BasicBlock* bottomBlock = nullptr; if (InlineeCompiler->fgBBcount == 1) { // When fgBBCount is 1 we will always have a non-NULL fgFirstBB // PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr); // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) { stmtAfter = fgInsertStmtListAfter(iciBlock, stmtAfter, InlineeCompiler->fgFirstBB->firstStmt()); } // Copy inlinee bbFlags to caller bbFlags. const BasicBlockFlags inlineeBlockFlags = InlineeCompiler->fgFirstBB->bbFlags; noway_assert((inlineeBlockFlags & BBF_HAS_JMP) == 0); noway_assert((inlineeBlockFlags & BBF_KEEP_BBJ_ALWAYS) == 0); // Todo: we may want to exclude other flags here. iciBlock->bbFlags |= (inlineeBlockFlags & ~BBF_RUN_RARELY); #ifdef DEBUG if (verbose) { noway_assert(currentDumpStmt); if (currentDumpStmt != stmtAfter) { do { currentDumpStmt = currentDumpStmt->GetNextStmt(); printf("\n"); gtDispStmt(currentDumpStmt); printf("\n"); } while (currentDumpStmt != stmtAfter); } } #endif // DEBUG // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, iciBlock, stmtAfter); goto _Done; } } // // ======= Inserting inlinee's basic blocks =============== // bottomBlock = fgNewBBafter(topBlock->bbJumpKind, topBlock, true); bottomBlock->bbRefs = 1; bottomBlock->bbJumpDest = topBlock->bbJumpDest; bottomBlock->inheritWeight(topBlock); topBlock->bbJumpKind = BBJ_NONE; // Update block flags { const BasicBlockFlags originalFlags = topBlock->bbFlags; noway_assert((originalFlags & BBF_SPLIT_NONEXIST) == 0); topBlock->bbFlags &= ~(BBF_SPLIT_LOST); bottomBlock->bbFlags |= originalFlags & BBF_SPLIT_GAINED; } // Split statements between topBlock and bottomBlock. // First figure out bottomBlock_Begin Statement* bottomBlock_Begin; bottomBlock_Begin = stmtAfter->GetNextStmt(); if (topBlock->bbStmtList == nullptr) { // topBlock is empty before the split. // In this case, both topBlock and bottomBlock should be empty noway_assert(bottomBlock_Begin == nullptr); topBlock->bbStmtList = nullptr; bottomBlock->bbStmtList = nullptr; } else if (topBlock->bbStmtList == bottomBlock_Begin) { noway_assert(bottomBlock_Begin != nullptr); // topBlock contains at least one statement before the split. // And the split is before the first statement. // In this case, topBlock should be empty, and everything else should be moved to the bottomBlock. bottomBlock->bbStmtList = topBlock->bbStmtList; topBlock->bbStmtList = nullptr; } else if (bottomBlock_Begin == nullptr) { noway_assert(topBlock->bbStmtList != nullptr); // topBlock contains at least one statement before the split. // And the split is at the end of the topBlock. // In this case, everything should be kept in the topBlock, and the bottomBlock should be empty bottomBlock->bbStmtList = nullptr; } else { noway_assert(topBlock->bbStmtList != nullptr); noway_assert(bottomBlock_Begin != nullptr); // This is the normal case where both blocks should contain at least one statement. Statement* topBlock_Begin = topBlock->firstStmt(); noway_assert(topBlock_Begin != nullptr); Statement* topBlock_End = bottomBlock_Begin->GetPrevStmt(); noway_assert(topBlock_End != nullptr); Statement* bottomBlock_End = topBlock->lastStmt(); noway_assert(bottomBlock_End != nullptr); // Break the linkage between 2 blocks. topBlock_End->SetNextStmt(nullptr); // Fix up all the pointers. topBlock->bbStmtList = topBlock_Begin; topBlock->bbStmtList->SetPrevStmt(topBlock_End); bottomBlock->bbStmtList = bottomBlock_Begin; bottomBlock->bbStmtList->SetPrevStmt(bottomBlock_End); } // // Set the try and handler index and fix the jump types of inlinee's blocks. // for (BasicBlock* const block : InlineeCompiler->Blocks()) { noway_assert(!block->hasTryIndex()); noway_assert(!block->hasHndIndex()); block->copyEHRegion(iciBlock); block->bbFlags |= iciBlock->bbFlags & BBF_BACKWARD_JUMP; DebugInfo di = iciStmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { block->bbCodeOffs = di.GetLocation().GetOffset(); block->bbCodeOffsEnd = block->bbCodeOffs + 1; // TODO: is code size of 1 some magic number for inlining? } else { block->bbCodeOffs = 0; // TODO: why not BAD_IL_OFFSET? block->bbCodeOffsEnd = 0; block->bbFlags |= BBF_INTERNAL; } if (block->bbJumpKind == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); block->bbJumpKind = BBJ_NONE; } } } // Insert inlinee's blocks into inliner's block list. topBlock->setNext(InlineeCompiler->fgFirstBB); InlineeCompiler->fgLastBB->setNext(bottomBlock); // // Add inlinee's block count to inliner's. // fgBBcount += InlineeCompiler->fgBBcount; // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, bottomBlock, nullptr); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(InlineeCompiler->fgFirstBB, InlineeCompiler->fgLastBB, true); } #endif // DEBUG _Done: // // At this point, we have successully inserted inlinee's code. // // // Copy out some flags // compLongUsed |= InlineeCompiler->compLongUsed; compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed; compLocallocUsed |= InlineeCompiler->compLocallocUsed; compLocallocOptimized |= InlineeCompiler->compLocallocOptimized; compQmarkUsed |= InlineeCompiler->compQmarkUsed; compUnsafeCastUsed |= InlineeCompiler->compUnsafeCastUsed; compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; compHasBackwardJump |= InlineeCompiler->compHasBackwardJump; lvaGenericsContextInUse |= InlineeCompiler->lvaGenericsContextInUse; #ifdef FEATURE_SIMD if (InlineeCompiler->usesSIMDTypes()) { setUsesSIMDTypes(true); } #endif // FEATURE_SIMD // Update unmanaged call details info.compUnmanagedCallCountWithGCTransition += InlineeCompiler->info.compUnmanagedCallCountWithGCTransition; // Update stats for inlinee PGO // if (InlineeCompiler->fgPgoSchema != nullptr) { fgPgoInlineePgo++; } else if (InlineeCompiler->fgPgoFailReason != nullptr) { // Single block inlinees may not have probes // when we've ensabled minimal profiling (which // is now the default). // if (InlineeCompiler->fgBBcount == 1) { fgPgoInlineeNoPgoSingleBlock++; } else { fgPgoInlineeNoPgo++; } } // Update optMethodFlags CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG unsigned optMethodFlagsBefore = optMethodFlags; #endif optMethodFlags |= InlineeCompiler->optMethodFlags; #ifdef DEBUG if (optMethodFlags != optMethodFlagsBefore) { JITDUMP("INLINER: Updating optMethodFlags -- root:%0x callee:%0x new:%0x\n", optMethodFlagsBefore, InlineeCompiler->optMethodFlags, optMethodFlags); } #endif // If an inlinee needs GS cookie we need to make sure that the cookie will not be allocated at zero stack offset. // Note that if the root method needs GS cookie then this has already been taken care of. if (!getNeedsGSSecurityCookie() && InlineeCompiler->getNeedsGSSecurityCookie()) { setNeedsGSSecurityCookie(); const unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy for inlinee")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // If there is non-NULL return, replace the GT_CALL with its return value expression, // so later it will be picked up by the GT_RET_EXPR node. if ((pInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID) || (iciCall->gtReturnType == TYP_STRUCT)) { noway_assert(pInlineInfo->retExpr); #ifdef DEBUG if (verbose) { printf("\nReturn expression for call at "); printTreeID(iciCall); printf(" is\n"); gtDispTree(pInlineInfo->retExpr); } #endif // DEBUG // Replace the call with the return expression. Note that iciCall won't be part of the IR // but may still be referenced from a GT_RET_EXPR node. We will replace GT_RET_EXPR node // in fgUpdateInlineReturnExpressionPlaceHolder. At that time we will also update the flags // on the basic block of GT_RET_EXPR node. if (iciCall->gtInlineCandidateInfo->retExpr->OperGet() == GT_RET_EXPR) { // Save the basic block flags from the retExpr basic block. iciCall->gtInlineCandidateInfo->retExpr->AsRetExpr()->bbFlags = pInlineInfo->retBB->bbFlags; } if (bottomBlock != nullptr) { // We've split the iciblock into two and the RET_EXPR was possibly moved to the bottomBlock // so let's update its flags with retBB's ones bottomBlock->bbFlags |= pInlineInfo->retBB->bbFlags & BBF_COMPACT_UPD; } iciCall->ReplaceWith(pInlineInfo->retExpr, this); } // // Detach the GT_CALL node from the original statement by hanging a "nothing" node under it, // so that fgMorphStmts can remove the statement once we return from here. // iciStmt->SetRootNode(gtNewNothingNode()); } //------------------------------------------------------------------------ // fgInlinePrependStatements: prepend statements needed to match up // caller and inlined callee // // Arguments: // inlineInfo -- info for the inline // // Return Value: // The last statement that was added, or the original call if no // statements were added. // // Notes: // Statements prepended may include the following: // * This pointer null check // * Class initialization // * Zeroing of must-init locals in the callee // * Passing of call arguments via temps // // Newly added statements are placed just after the original call // and are are given the same inline context as the call any calls // added here will appear to have been part of the immediate caller. Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { BasicBlock* block = inlineInfo->iciBlock; Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); Statement* postStmt = callStmt->GetNextStmt(); Statement* afterStmt = callStmt; // afterStmt is the place where the new statements should be inserted after. Statement* newStmt = nullptr; GenTreeCall* call = inlineInfo->iciCall->AsCall(); noway_assert(call->gtOper == GT_CALL); #ifdef DEBUG if (0 && verbose) { printf("\nfgInlinePrependStatements for iciCall= "); printTreeID(call); printf(":\n"); } #endif // Prepend statements for any initialization / side effects InlArgInfo* inlArgInfo = inlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; GenTree* tree; // Create the null check statement (but not appending it to the statement list yet) for the 'this' pointer if // necessary. // The NULL check should be done after "argument setup statements". // The only reason we move it here is for calling "impInlineFetchArg(0,..." to reserve a temp // for the "this" pointer. // Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner. // However the assetionProp logic will remove any unecessary null checks that we may have added // GenTree* nullcheck = nullptr; if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst) { // Call impInlineFetchArg to "reserve" a temp for the "this" pointer. GenTree* thisOp = impInlineFetchArg(0, inlArgInfo, lclVarInfo); if (fgAddrCouldBeNull(thisOp)) { nullcheck = gtNewNullCheck(thisOp, block); // The NULL-check statement will be inserted to the statement list after those statements // that assign arguments to temps and before the actual body of the inlinee method. } } /* Treat arguments that had to be assigned to temps */ if (inlineInfo->argCnt) { #ifdef DEBUG if (verbose) { printf("\nArguments setup:\n"); } #endif // DEBUG for (unsigned argNum = 0; argNum < inlineInfo->argCnt; argNum++) { const InlArgInfo& argInfo = inlArgInfo[argNum]; const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; GenTree* argNode = inlArgInfo[argNum].argNode; const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE); BasicBlockFlags bbFlags = BBF_EMPTY; argNode = argNode->gtSkipPutArgType(); argNode = argNode->gtRetExprVal(&bbFlags); if (argInfo.argHasTmp) { noway_assert(argInfo.argIsUsed); /* argBashTmpNode is non-NULL iff the argument's value was referenced exactly once by the original IL. This offers an opportunity to avoid an intermediate temp and just insert the original argument tree. However, if the temp node has been cloned somewhere while importing (e.g. when handling isinst or dup), or if the IL took the address of the argument, then argBashTmpNode will be set (because the value was only explicitly retrieved once) but the optimization cannot be applied. */ GenTree* argSingleUseNode = argInfo.argBashTmpNode; // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`, // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise. if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef && !argHasPutArg) { // Change the temp in-place to the actual argument. // We currently do not support this for struct arguments, so it must not be a GT_OBJ. assert(argNode->gtOper != GT_OBJ); argSingleUseNode->ReplaceWith(argNode, this); continue; } else { // We're going to assign the argument value to the // temp we use for it in the inline body. const unsigned tmpNum = argInfo.argTmpNum; const var_types argType = lclVarInfo[argNum].lclTypeInfo; // Create the temp assignment for this argument CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; if (varTypeIsStruct(argType)) { structHnd = gtGetStructHandleIfPresent(argNode); noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT)); } // Unsafe value cls check is not needed for // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when // creating the temp, over in impInlineFetchArg. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } else if (argInfo.argIsByRefToStructLocal) { // Do nothing. Arg was directly substituted as we read // the inlinee. } else { /* The argument is either not used or a const or lcl var */ noway_assert(!argInfo.argIsUsed || argInfo.argIsInvariant || argInfo.argIsLclVar); /* Make sure we didnt change argNode's along the way, or else subsequent uses of the arg would have worked with the bashed value */ if (argInfo.argIsInvariant) { assert(argNode->OperIsConst() || argNode->gtOper == GT_ADDR); } noway_assert((argInfo.argIsLclVar == 0) == (argNode->gtOper != GT_LCL_VAR || (argNode->gtFlags & GTF_GLOB_REF))); /* If the argument has side effects, append it */ if (argInfo.argHasSideEff) { noway_assert(argInfo.argIsUsed == false); newStmt = nullptr; bool append = true; if (argNode->gtOper == GT_OBJ || argNode->gtOper == GT_MKREFANY) { // Don't put GT_OBJ node under a GT_COMMA. // Codegen can't deal with it. // Just hang the address here in case there are side-effect. newStmt = gtNewStmt(gtUnusedValNode(argNode->AsOp()->gtOp1), callDI); } else { // In some special cases, unused args with side effects can // trigger further changes. // // (1) If the arg is a static field access and the field access // was produced by a call to EqualityComparer<T>.get_Default, the // helper call to ensure the field has a value can be suppressed. // This helper call is marked as a "Special DCE" helper during // importation, over in fgGetStaticsCCtorHelper. // // (2) NYI. If, after tunneling through GT_RET_VALs, we find that // the actual arg expression has no side effects, we can skip // appending all together. This will help jit TP a bit. // // Chase through any GT_RET_EXPRs to find the actual argument // expression. GenTree* actualArgNode = argNode->gtRetExprVal(&bbFlags); // For case (1) // // Look for the following tree shapes // prejit: (IND (ADD (CONST, CALL(special dce helper...)))) // jit : (COMMA (CALL(special dce helper...), (FIELD ...))) if (actualArgNode->gtOper == GT_COMMA) { // Look for (COMMA (CALL(special dce helper...), (FIELD ...))) GenTree* op1 = actualArgNode->AsOp()->gtOp1; GenTree* op2 = actualArgNode->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && (op2->gtOper == GT_FIELD) && ((op2->gtFlags & GTF_EXCEPT) == 0)) { JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); // Drop the whole tree append = false; } } else if (actualArgNode->gtOper == GT_IND) { // Look for (IND (ADD (CONST, CALL(special dce helper...)))) GenTree* addr = actualArgNode->AsOp()->gtOp1; if (addr->gtOper == GT_ADD) { GenTree* op1 = addr->AsOp()->gtOp1; GenTree* op2 = addr->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && op2->IsCnsIntOrI()) { // Drop the whole tree JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); append = false; } } } } if (!append) { assert(newStmt == nullptr); JITDUMP("Arg tree side effects were discardable, not appending anything for arg\n"); } else { // If we don't have something custom to append, // just append the arg node as an unused value. if (newStmt == nullptr) { newStmt = gtNewStmt(gtUnusedValNode(argNode), callDI); } fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } else if (argNode->IsBoxedValue()) { // Try to clean up any unnecessary boxing side effects // since the box itself will be ignored. gtTryRemoveBoxUpstreamEffects(argNode); } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } } } // Add the CCTOR check if asked for. // Note: We no longer do the optimization that is done before by staticAccessedFirstUsingHelper in the old inliner. // Therefore we might prepend redundant call to HELPER.CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE // before the inlined method body, even if a static field of this type was accessed in the inlinee // using a helper before any other observable side-effect. if (inlineInfo->inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_USE_HELPER) { CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(inlineInfo->inlineCandidateInfo->exactContextHnd); tree = fgGetSharedCCtor(exactClass); newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // Insert the nullcheck statement now. if (nullcheck) { newStmt = gtNewStmt(nullcheck, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // // Now zero-init inlinee locals // CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the // normal logic in the caller to insert zero-init in the prolog if necessary. if ((lclCnt != 0) && ((InlineeMethodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0) && ((bbInALoop && !bbIsReturn) || !info.compInitMem)) { #ifdef DEBUG if (verbose) { printf("\nZero init inlinee locals:\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // If the local is used check whether we need to insert explicit zero initialization. if (tmpNum != BAD_VAR_NUM) { LclVarDsc* const tmpDsc = lvaGetDesc(tmpNum); if (!fgVarNeedsExplicitZeroInit(tmpNum, bbInALoop, bbIsReturn)) { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", tmpNum); tmpDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; continue; } var_types lclTyp = (var_types)lvaTable[tmpNum].lvType; noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); if (!varTypeIsStruct(lclTyp)) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); } else { tree = gtNewBlkOpNode(gtNewLclvNode(tmpNum, lclTyp), // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } } return afterStmt; } //------------------------------------------------------------------------ // fgInlineAppendStatements: Append statements that are needed // after the inlined call. // // Arguments: // inlineInfo - information about the inline // block - basic block for the new statements // stmtAfter - (optional) insertion point for mid-block cases // // Notes: // If the call we're inlining is in tail position then // we skip nulling the locals, since it can interfere // with tail calls introduced by the local. void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmtAfter) { // Null out any gc ref locals if (!inlineInfo->HasGcRefLocals()) { // No ref locals, nothing to do. JITDUMP("fgInlineAppendStatements: no gc ref inline locals.\n"); return; } if (inlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("fgInlineAppendStatements: implicit tail call; skipping nulling.\n"); return; } JITDUMP("fgInlineAppendStatements: nulling out gc ref inlinee locals.\n"); Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; const unsigned lclCnt = InlineeMethodInfo->locals.numArgs; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; unsigned gcRefLclCnt = inlineInfo->numberOfGcRefLocals; const unsigned argCnt = inlineInfo->argCnt; for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { // Is the local a gc ref type? Need to look at the // inline info for this since we will not have local // temps for unused inlinee locals. const var_types lclTyp = lclVarInfo[argCnt + lclNum].lclTypeInfo; if (!varTypeIsGC(lclTyp)) { // Nope, nothing to null out. continue; } // Ensure we're examining just the right number of locals. assert(gcRefLclCnt > 0); gcRefLclCnt--; // Fetch the temp for this inline local const unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // Is the local used at all? if (tmpNum == BAD_VAR_NUM) { // Nope, nothing to null out. continue; } // Local was used, make sure the type is consistent. assert(lvaTable[tmpNum].lvType == lclTyp); // Does the local we're about to null out appear in the return // expression? If so we somehow messed up and didn't properly // spill the return value. See impInlineFetchLocal. GenTree* retExpr = inlineInfo->retExpr; if (retExpr != nullptr) { const bool interferesWithReturn = gtHasRef(inlineInfo->retExpr, tmpNum); noway_assert(!interferesWithReturn); } // Assign null to the local. GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); Statement* nullStmt = gtNewStmt(nullExpr, callDI); if (stmtAfter == nullptr) { fgInsertStmtAtBeg(block, nullStmt); } else { fgInsertStmtAfter(block, stmtAfter, nullStmt); } stmtAfter = nullStmt; #ifdef DEBUG if (verbose) { gtDispStmt(nullStmt); } #endif // DEBUG } // There should not be any GC ref locals left to null out. assert(gcRefLclCnt == 0); } //------------------------------------------------------------------------ // fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns // as a temp. // // Return Value: // true if the inlinee has to spill return exprs. bool Compiler::fgNeedReturnSpillTemp() { assert(compIsForInlining()); return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Inline Support /*****************************************************************************/ //------------------------------------------------------------------------ // fgCheckForInlineDepthAndRecursion: compute depth of the candidate, and // check for recursion. // // Return Value: // The depth of the inline candidate. The root method is a depth 0, top-level // candidates at depth 1, etc. // // Notes: // We generally disallow recursive inlines by policy. However, they are // supported by the underlying machinery. // // Likewise the depth limit is a policy consideration, and serves mostly // as a safeguard to prevent runaway inlining of small methods. // unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo) { BYTE* candidateCode = inlineInfo->inlineCandidateInfo->methInfo.ILCode; InlineContext* inlineContext = inlineInfo->inlineCandidateInfo->inlinersContext; InlineResult* inlineResult = inlineInfo->inlineResult; // There should be a context for all candidates. assert(inlineContext != nullptr); int depth = 0; for (; inlineContext != nullptr; inlineContext = inlineContext->GetParent()) { assert(inlineContext->GetCode() != nullptr); depth++; if (inlineContext->GetCode() == candidateCode) { // This inline candidate has the same IL code buffer as an already // inlined method does. inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_RECURSIVE); // No need to note CALLSITE_DEPTH we're already rejecting this candidate return depth; } if (depth > InlineStrategy::IMPLEMENTATION_MAX_INLINE_DEPTH) { break; } } inlineResult->NoteInt(InlineObservation::CALLSITE_DEPTH, depth); return depth; } //------------------------------------------------------------------------ // fgInline - expand inline candidates // // Returns: // phase status indicating if anything was modified // // Notes: // Inline candidates are identified during importation and candidate calls // must be top-level expressions. In input IR, the result of the call (if any) // is consumed elsewhere by a GT_RET_EXPR node. // // For successful inlines, calls are replaced by a sequence of argument setup // instructions, the inlined method body, and return value cleanup. Note // Inlining may introduce new inline candidates. These are processed in a // depth-first fashion, as the inliner walks the IR in statement order. // // After inline expansion in a statement, the statement tree // is walked to locate GT_RET_EXPR nodes. These are replaced by either // * the original call tree, if the inline failed // * the return value tree from the inlinee, if the inline succeeded // // This replacement happens in preorder; on the postorder side of the same // tree walk, we look for opportunties to devirtualize or optimize now that // we know the context for the newly supplied return value tree. // // Inline arguments may be directly substituted into the body of the inlinee // in some cases. See impInlineFetchArg. // PhaseStatus Compiler::fgInline() { if (!opts.OptEnabled(CLFLG_INLINING)) { return PhaseStatus::MODIFIED_NOTHING; } #ifdef DEBUG fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args); #endif // DEBUG noway_assert(fgFirstBB != nullptr); BasicBlock* block = fgFirstBB; bool madeChanges = false; do { // Make the current basic block address available globally compCurBB = block; for (Statement* const stmt : block->Statements()) { #if defined(DEBUG) || defined(INLINE_DATA) // In debug builds we want the inline tree to show all failed // inlines. Some inlines may fail very early and never make it to // candidate stage. So scan the tree looking for those early failures. fgWalkTreePre(stmt->GetRootNodePointer(), fgFindNonInlineCandidate, stmt); #endif GenTree* expr = stmt->GetRootNode(); // The importer ensures that all inline candidates are // statement expressions. So see if we have a call. if (expr->IsCall()) { GenTreeCall* call = expr->AsCall(); // We do. Is it an inline candidate? // // Note we also process GuardeDevirtualizationCandidates here as we've // split off GT_RET_EXPRs for them even when they are not inline candidates // as we need similar processing to ensure they get patched back to where // they belong. if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { InlineResult inlineResult(this, call, stmt, "fgInline"); fgMorphStmt = stmt; fgMorphCallInline(call, &inlineResult); // If there's a candidate to process, we will make changes madeChanges = true; // fgMorphCallInline may have updated the // statement expression to a GT_NOP if the // call returned a value, regardless of // whether the inline succeeded or failed. // // If so, remove the GT_NOP and continue // on with the next statement. if (stmt->GetRootNode()->IsNothingNode()) { fgRemoveStmt(block, stmt); continue; } } } // See if we need to replace some return value place holders. // Also, see if this replacement enables further devirtualization. // // Note we have both preorder and postorder callbacks here. // // The preorder callback is responsible for replacing GT_RET_EXPRs // with the appropriate expansion (call or inline result). // Replacement may introduce subtrees with GT_RET_EXPR and so // we rely on the preorder to recursively process those as well. // // On the way back up, the postorder callback then re-examines nodes for // possible further optimization, as the (now complete) GT_RET_EXPR // replacement may have enabled optimizations by providing more // specific types for trees or variables. fgWalkTree(stmt->GetRootNodePointer(), fgUpdateInlineReturnExpressionPlaceHolder, fgLateDevirtualization, (void*)&madeChanges); // See if stmt is of the form GT_COMMA(call, nop) // If yes, we can get rid of GT_COMMA. if (expr->OperGet() == GT_COMMA && expr->AsOp()->gtOp1->OperGet() == GT_CALL && expr->AsOp()->gtOp2->OperGet() == GT_NOP) { madeChanges = true; stmt->SetRootNode(expr->AsOp()->gtOp1); } } block = block->bbNext; } while (block); #ifdef DEBUG // Check that we should not have any inline candidate or return value place holder left. block = fgFirstBB; noway_assert(block); do { for (Statement* const stmt : block->Statements()) { // Call Compiler::fgDebugCheckInlineCandidates on each node fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates); } block = block->bbNext; } while (block); fgVerifyHandlerTab(); if (verbose || fgPrintInlinedMethods) { JITDUMP("**************** Inline Tree"); printf("\n"); m_inlineStrategy->Dump(verbose || JitConfig.JitPrintInlinedMethodsVerbose()); } #endif // DEBUG return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } #if defined(DEBUG) || defined(INLINE_DATA) //------------------------------------------------------------------------ // fgFindNonInlineCandidate: tree walk helper to ensure that a tree node // that is not an inline candidate is noted as a failed inline. // // Arguments: // pTree - pointer to pointer tree node being walked // data - contextual data for the walk // // Return Value: // walk result // // Note: // Invokes fgNoteNonInlineCandidate on the nodes it finds. Compiler::fgWalkResult Compiler::fgFindNonInlineCandidate(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { Compiler* compiler = data->compiler; Statement* stmt = (Statement*)data->pCallbackData; GenTreeCall* call = tree->AsCall(); compiler->fgNoteNonInlineCandidate(stmt, call); } return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgNoteNonInlineCandidate: account for inlining failures in calls // not marked as inline candidates. // // Arguments: // stmt - statement containing the call // call - the call itself // // Notes: // Used in debug only to try and place descriptions of inline failures // into the proper context in the inline tree. void Compiler::fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call) { if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) { return; } InlineResult inlineResult(this, call, nullptr, "fgNoteNonInlineCandidate"); InlineObservation currentObservation = InlineObservation::CALLSITE_NOT_CANDIDATE; // Try and recover the reason left behind when the jit decided // this call was not a candidate. InlineObservation priorObservation = call->gtInlineObservation; if (InlIsValidObservation(priorObservation)) { currentObservation = priorObservation; } // Propagate the prior failure observation to this result. inlineResult.NotePriorFailure(currentObservation); inlineResult.SetReported(); if (call->gtCallType == CT_USER_FUNC) { m_inlineStrategy->NewContext(call->gtInlineContext, stmt, call)->SetFailed(&inlineResult); } } #endif #if FEATURE_MULTIREG_RET /********************************************************************************* * * tree - The node which needs to be converted to a struct pointer. * * Return the pointer by either __replacing__ the tree node with a suitable pointer * type or __without replacing__ and just returning a subtree or by __modifying__ * a subtree. */ GenTree* Compiler::fgGetStructAsStructPtr(GenTree* tree) { noway_assert(tree->OperIs(GT_LCL_VAR, GT_FIELD, GT_IND, GT_BLK, GT_OBJ, GT_COMMA) || tree->OperIsSIMD() || tree->OperIsHWIntrinsic()); // GT_CALL, cannot get address of call. // GT_MKREFANY, inlining should've been aborted due to mkrefany opcode. // GT_RET_EXPR, cannot happen after fgUpdateInlineReturnExpressionPlaceHolder switch (tree->OperGet()) { case GT_BLK: case GT_OBJ: case GT_IND: return tree->AsOp()->gtOp1; case GT_COMMA: tree->AsOp()->gtOp2 = fgGetStructAsStructPtr(tree->AsOp()->gtOp2); tree->gtType = TYP_BYREF; return tree; default: return gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } /*************************************************************************************************** * child - The inlinee of the retExpr node. * retClsHnd - The struct class handle of the type of the inlinee. * * Assign the inlinee to a tmp, if it is a call, just assign it to a lclVar, else we can * use a copyblock to do the assignment. */ GenTree* Compiler::fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { assert(child->gtOper != GT_RET_EXPR && child->gtOper != GT_MKREFANY); unsigned tmpNum = lvaGrabTemp(false DEBUGARG("RetBuf for struct inline return candidates.")); lvaSetStruct(tmpNum, retClsHnd, false); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); // If we have a call, we'd like it to be: V00 = call(), but first check if // we have a ", , , call()" -- this is very defensive as we may never get // an inlinee that is made of commas. If the inlinee is not a call, then // we use a copy block to do the assignment. GenTree* src = child; GenTree* lastComma = nullptr; while (src->gtOper == GT_COMMA) { lastComma = src; src = src->AsOp()->gtOp2; } GenTree* newInlinee = nullptr; if (src->gtOper == GT_CALL) { // If inlinee was just a call, new inlinee is v05 = call() newInlinee = gtNewAssignNode(dst, src); // When returning a multi-register value in a local var, make sure the variable is // marked as lvIsMultiRegRet, so it does not get promoted. if (src->AsCall()->HasMultiRegRetVal()) { lvaTable[tmpNum].lvIsMultiRegRet = true; } // If inlinee was comma, but a deeper call, new inlinee is (, , , v05 = call()) if (child->gtOper == GT_COMMA) { lastComma->AsOp()->gtOp2 = newInlinee; newInlinee = child; } } else { // Inlinee is not a call, so just create a copy block to the tmp. src = child; GenTree* dstAddr = fgGetStructAsStructPtr(dst); GenTree* srcAddr = fgGetStructAsStructPtr(src); newInlinee = gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false); } GenTree* production = gtNewLclvNode(tmpNum, structType); return gtNewOperNode(GT_COMMA, structType, newInlinee, production); } /*************************************************************************************************** * tree - The tree pointer that has one of its child nodes as retExpr. * child - The inlinee child. * retClsHnd - The struct class handle of the type of the inlinee. * * V04 = call() assignments are okay as we codegen it. Everything else needs to be a copy block or * would need a temp. For example, a cast(ldobj) will then be, cast(v05 = ldobj, v05); But it is * a very rare (or impossible) scenario that we'd have a retExpr transform into a ldobj other than * a lclVar/call. So it is not worthwhile to do pattern matching optimizations like addr(ldobj(op1)) * can just be op1. */ void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) { // We are okay to have: // 1. V02 = call(); // 2. copyBlk(dstAddr, srcAddr); assert(tree->gtOper == GT_ASG); // We have an assignment, we codegen only V05 = call(). if (child->gtOper == GT_CALL && tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { // If it is a multireg return on x64/ux, the local variable should be marked as lvIsMultiRegRet if (child->AsCall()->HasMultiRegRetVal()) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; } return; } GenTree* dstAddr = fgGetStructAsStructPtr(tree->AsOp()->gtOp1); GenTree* srcAddr = fgGetStructAsStructPtr( (child->gtOper == GT_CALL) ? fgAssignStructInlineeToVar(child, retClsHnd) // Assign to a variable if it is a call. : child); // Just get the address, if not a call. tree->ReplaceWith(gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false), this); } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // fgUpdateInlineReturnExpressionPlaceHolder: callback to replace the // inline return expression placeholder. // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // Looks for GT_RET_EXPR nodes that arose from tree splitting done // during importation for inline candidates, and replaces them. // // For successful inlines, substitutes the return value expression // from the inline body for the GT_RET_EXPR. // // For failed inlines, rejoins the original call into the tree from // whence it was split during importation. // // The code doesn't actually know if the corresponding inline // succeeded or not; it relies on the fact that gtInlineCandidate // initially points back at the call and is modified in place to // the inlinee return expression if the inline is successful (see // tail end of fgInsertInlineeBlocks for the update of iciCall). // // If the return type is a struct type and we're on a platform // where structs can be returned in multiple registers, ensure the // call has a suitable parent. // // If the original call type and the substitution type are different // the functions makes necessary updates. It could happen if there was // an implicit conversion in the inlinee body. // Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) { // All the operations here and in the corresponding postorder // callback (fgLateDevirtualization) are triggered by GT_CALL or // GT_RET_EXPR trees, and these (should) have the call side // effect flag. // // So bail out for any trees that don't have this flag. GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { return WALK_SKIP_SUBTREES; } bool* madeChanges = static_cast<bool*>(data->pCallbackData); Compiler* comp = data->compiler; CORINFO_CLASS_HANDLE retClsHnd = NO_CLASS_HANDLE; while (tree->OperGet() == GT_RET_EXPR) { // We are going to copy the tree from the inlinee, // so record the handle now. // if (varTypeIsStruct(tree)) { retClsHnd = tree->AsRetExpr()->gtRetClsHnd; } // Skip through chains of GT_RET_EXPRs (say from nested inlines) // to the actual tree to use. // // Also we might as well try and fold the return value. // Eg returns of constant bools will have CASTS. // This folding may uncover more GT_RET_EXPRs, so we loop around // until we've got something distinct. // BasicBlockFlags bbFlags = BBF_EMPTY; GenTree* inlineCandidate = tree->gtRetExprVal(&bbFlags); inlineCandidate = comp->gtFoldExpr(inlineCandidate); var_types retType = tree->TypeGet(); #ifdef DEBUG if (comp->verbose) { printf("\nReplacing the return expression placeholder "); printTreeID(tree); printf(" with "); printTreeID(inlineCandidate); printf("\n"); // Dump out the old return expression placeholder it will be overwritten by the ReplaceWith below comp->gtDispTree(tree); } #endif // DEBUG var_types newType = inlineCandidate->TypeGet(); // If we end up swapping type we may need to retype the tree: if (retType != newType) { if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) { // - in an RVA static if we've reinterpreted it as a byref; assert(newType == TYP_I_IMPL); JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); inlineCandidate->gtType = TYP_BYREF; } else { // - under a call if we changed size of the argument. GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType); if (putArgType != nullptr) { inlineCandidate = putArgType; } } } tree->ReplaceWith(inlineCandidate, comp); *madeChanges = true; comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); #ifdef DEBUG if (comp->verbose) { printf("\nInserting the inline return expression\n"); comp->gtDispTree(tree); printf("\n"); } #endif // DEBUG } // If an inline was rejected and the call returns a struct, we may // have deferred some work when importing call for cases where the // struct is returned in register(s). // // See the bail-out clauses in impFixupCallStructReturn for inline // candidates. // // Do the deferred work now. if (retClsHnd != NO_CLASS_HANDLE) { structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, CorInfoCallConvExtension::Managed, &howToReturnStruct); GenTree* parent = data->parent; switch (howToReturnStruct) { #if FEATURE_MULTIREG_RET // Is this a type that is returned in multiple registers // or a via a primitve type that is larger than the struct type? // if so we need to force into into a form we accept. // i.e. LclVar = call() case SPK_ByValue: case SPK_ByValueAsHfa: { // See assert below, we only look one level above for an asg parent. if (parent->gtOper == GT_ASG) { // Either lhs is a call V05 = call(); or lhs is addr, and asg becomes a copyBlk. comp->fgAttachStructInlineeToAsg(parent, tree, retClsHnd); } else { // Just assign the inlinee to a variable to keep it simple. tree->ReplaceWith(comp->fgAssignStructInlineeToVar(tree, retClsHnd), comp); } *madeChanges = true; } break; #endif // FEATURE_MULTIREG_RET case SPK_EnclosingType: case SPK_PrimitiveType: // No work needs to be done, the call has struct type and should keep it. break; case SPK_ByReference: // We should have already added the return buffer // when we first imported the call break; default: noway_assert(!"Unexpected struct passing kind"); break; } } #if FEATURE_MULTIREG_RET #if defined(DEBUG) // Make sure we don't have a tree like so: V05 = (, , , retExpr); // Since we only look one level above for the parent for '=' and // do not check if there is a series of COMMAs. See above. // Importer and FlowGraph will not generate such a tree, so just // leaving an assert in here. This can be fixed by looking ahead // when we visit GT_ASG similar to fgAttachStructInlineeToAsg. // if (tree->OperGet() == GT_ASG) { GenTree* value = tree->AsOp()->gtOp2; if (value->OperGet() == GT_COMMA) { GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true); noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) || !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd, CorInfoCallConvExtension::Managed)); } } #endif // defined(DEBUG) #endif // FEATURE_MULTIREG_RET return WALK_CONTINUE; } //------------------------------------------------------------------------ // fgLateDevirtualization: re-examine calls after inlining to see if we // can do more devirtualization // // Arguments: // pTree -- pointer to tree to examine for updates // data -- context data for the tree walk // // Returns: // fgWalkResult indicating the walk should continue; that // is we wish to fully explore the tree. // // Notes: // We used to check this opportunistically in the preorder callback for // calls where the `obj` was fed by a return, but we now re-examine // all calls. // // Late devirtualization (and eventually, perhaps, other type-driven // opts like cast optimization) can happen now because inlining or other // optimizations may have provided more accurate types than we saw when // first importing the trees. // // It would be nice to screen candidate sites based on the likelihood // that something has changed. Otherwise we'll waste some time retrying // an optimization that will just fail again. Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; GenTree* parent = data->parent; Compiler* comp = data->compiler; bool* madeChanges = static_cast<bool*>(data->pCallbackData); // In some (rare) cases the parent node of tree will be smashed to a NOP during // the preorder by fgAttachStructToInlineeArg. // // jit\Methodical\VT\callconv\_il_reljumper3 for x64 linux // // If so, just bail out here. if (tree == nullptr) { assert((parent != nullptr) && parent->OperGet() == GT_NOP); return WALK_CONTINUE; } if (tree->OperGet() == GT_CALL) { GenTreeCall* call = tree->AsCall(); bool tryLateDevirt = call->IsVirtual() && (call->gtCallType == CT_USER_FUNC); #ifdef DEBUG tryLateDevirt = tryLateDevirt && (JitConfig.JitEnableLateDevirtualization() == 1); #endif // DEBUG if (tryLateDevirt) { #ifdef DEBUG if (comp->verbose) { printf("**** Late devirt opportunity\n"); comp->gtDispTree(call); } #endif // DEBUG CORINFO_CONTEXT_HANDLE context = nullptr; CORINFO_METHOD_HANDLE method = call->gtCallMethHnd; unsigned methodFlags = 0; const bool isLateDevirtualization = true; const bool explicitTailCall = call->IsTailPrefixedCall(); if ((call->gtCallMoreFlags & GTF_CALL_M_LATE_DEVIRT) != 0) { context = call->gtLateDevirtualizationInfo->exactContextHnd; call->gtLateDevirtualizationInfo = nullptr; } comp->impDevirtualizeCall(call, nullptr, &method, &methodFlags, &context, nullptr, isLateDevirtualization, explicitTailCall); *madeChanges = true; } } else if (tree->OperGet() == GT_ASG) { // If we're assigning to a ref typed local that has one definition, // we may be able to sharpen the type for the local. GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) { const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); LclVarDsc* lcl = comp->lvaGetDesc(lclNum); if (lcl->lvSingleDef) { GenTree* rhs = tree->gtGetOp2(); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE newClass = comp->gtGetClassHandle(rhs, &isExact, &isNonNull); if (newClass != NO_CLASS_HANDLE) { comp->lvaUpdateClass(lclNum, newClass, isExact); *madeChanges = true; } } } // If we created a self-assignment (say because we are sharing return spill temps) // we can remove it. // GenTree* const lhs = tree->gtGetOp1(); GenTree* const rhs = tree->gtGetOp2(); if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) { comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); JITDUMP("... removing self-assignment\n"); DISPTREE(tree); tree->gtBashToNOP(); *madeChanges = true; } } else if (tree->OperGet() == GT_JTRUE) { // See if this jtrue is now foldable. BasicBlock* block = comp->compCurBB; GenTree* condTree = tree->AsOp()->gtOp1; assert(tree == block->lastStmt()->GetRootNode()); if (condTree->OperGet() == GT_CNS_INT) { JITDUMP(" ... found foldable jtrue at [%06u] in " FMT_BB "\n", dspTreeID(tree), block->bbNum); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); // We have a constant operand, and should have the all clear to optimize. // Update side effects on the tree, assert there aren't any, and bash to nop. comp->gtUpdateNodeSideEffects(tree); assert((tree->gtFlags & GTF_SIDE_EFFECT) == 0); tree->gtBashToNOP(); *madeChanges = true; BasicBlock* bNotTaken = nullptr; if (condTree->AsIntCon()->gtIconVal != 0) { block->bbJumpKind = BBJ_ALWAYS; bNotTaken = block->bbNext; } else { block->bbJumpKind = BBJ_NONE; bNotTaken = block->bbJumpDest; } comp->fgRemoveRefPred(bNotTaken, block); // If that was the last ref, a subsequent flow-opt pass // will clean up the now-unreachable bNotTaken, and any // other transitively unreachable blocks. if (bNotTaken->bbRefs == 0) { JITDUMP("... it looks like " FMT_BB " is now unreachable!\n", bNotTaken->bbNum); } } } else { const var_types retType = tree->TypeGet(); GenTree* foldedTree = comp->gtFoldExpr(tree); GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType); if (putArgType != nullptr) { foldedTree = putArgType; } *pTree = foldedTree; *madeChanges = true; } return WALK_CONTINUE; } #ifdef DEBUG /***************************************************************************** * Callback to make sure there is no more GT_RET_EXPR and GTF_CALL_INLINE_CANDIDATE nodes. */ /* static */ Compiler::fgWalkResult Compiler::fgDebugCheckInlineCandidates(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; if (tree->gtOper == GT_CALL) { assert((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); } else { assert(tree->gtOper != GT_RET_EXPR); } return WALK_CONTINUE; } #endif // DEBUG void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* inlineResult, InlineContext** createdContext) { noway_assert(call->gtOper == GT_CALL); noway_assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0); noway_assert(opts.OptEnabled(CLFLG_INLINING)); // This is the InlineInfo struct representing a method to be inlined. InlineInfo inlineInfo; memset(&inlineInfo, 0, sizeof(inlineInfo)); CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd; inlineInfo.fncHandle = fncHandle; inlineInfo.iciCall = call; inlineInfo.iciStmt = fgMorphStmt; inlineInfo.iciBlock = compCurBB; inlineInfo.thisDereferencedFirst = false; inlineInfo.retExpr = nullptr; inlineInfo.retBB = nullptr; inlineInfo.retExprClassHnd = nullptr; inlineInfo.retExprClassHndIsExact = false; inlineInfo.inlineResult = inlineResult; #ifdef FEATURE_SIMD inlineInfo.hasSIMDTypeArgLocalOrReturn = false; #endif // FEATURE_SIMD InlineCandidateInfo* inlineCandidateInfo = call->gtInlineCandidateInfo; noway_assert(inlineCandidateInfo); // Store the link to inlineCandidateInfo into inlineInfo inlineInfo.inlineCandidateInfo = inlineCandidateInfo; unsigned inlineDepth = fgCheckInlineDepthAndRecursion(&inlineInfo); if (inlineResult->IsFailure()) { #ifdef DEBUG if (verbose) { printf("Recursive or deep inline recursion detected. Will not expand this INLINECANDIDATE \n"); } #endif // DEBUG return; } // Set the trap to catch all errors (including recoverable ones from the EE) struct Param { Compiler* pThis; GenTree* call; CORINFO_METHOD_HANDLE fncHandle; InlineCandidateInfo* inlineCandidateInfo; InlineInfo* inlineInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.inlineCandidateInfo = inlineCandidateInfo; param.inlineInfo = &inlineInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { // Init the local var info of the inlinee pParam->pThis->impInlineInitVars(pParam->inlineInfo); if (pParam->inlineInfo->inlineResult->IsCandidate()) { /* Clear the temp table */ memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); // // Prepare the call to jitNativeCode // pParam->inlineInfo->InlinerCompiler = pParam->pThis; if (pParam->pThis->impInlineInfo == nullptr) { pParam->inlineInfo->InlineRoot = pParam->pThis; } else { pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; } // The inline context is part of debug info and must be created // before we start creating statements; we lazily create it as // late as possible, which is here. pParam->inlineInfo->inlineContext = pParam->inlineInfo->InlineRoot->m_inlineStrategy ->NewContext(pParam->inlineInfo->inlineCandidateInfo->inlinersContext, pParam->inlineInfo->iciStmt, pParam->inlineInfo->iciCall); pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; JITLOG_THIS(pParam->pThis, (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle), pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; // The following flags are lost when inlining. // (This is checked in Compiler::compInitOptions().) compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_REVERSE_PINVOKE); compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_TRACK_TRANSITIONS); compileFlagsForInlinee.Set(JitFlags::JIT_FLAG_SKIP_VERIFICATION); #ifdef DEBUG if (pParam->pThis->verbose) { printf("\nInvoking compiler for the inlinee method %s :\n", pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); } #endif // DEBUG int result = jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); if (result != CORJIT_OK) { // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; if (!innerInlineResult->IsFailure()) { innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); } } } }, &param); if (!success) { #ifdef DEBUG if (verbose) { printf("\nInlining failed due to an exception during invoking the compiler for the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If we haven't yet determined why this inline fails, use // a catch-all something bad happened observation. if (!inlineResult->IsFailure()) { inlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } *createdContext = inlineInfo.inlineContext; if (inlineResult->IsFailure()) { return; } #ifdef DEBUG if (0 && verbose) { printf("\nDone invoking compiler for the inlinee method %s\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG // If there is non-NULL return, but we haven't set the pInlineInfo->retExpr, // That means we haven't imported any BB that contains CEE_RET opcode. // (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which // causes the BBJ_RETURN block not to be imported at all.) // Fail the inlining attempt if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr) { #ifdef DEBUG if (verbose) { printf("\nInlining failed because pInlineInfo->retExpr is not set in the inlinee method %s.\n", eeGetMethodFullName(fncHandle)); } #endif // DEBUG inlineResult->NoteFatal(InlineObservation::CALLEE_LACKS_RETURN); return; } // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // The inlining attempt cannot be failed starting from this point. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // We've successfully obtain the list of inlinee's basic blocks. // Let's insert it to inliner's basic block list. fgInsertInlineeBlocks(&inlineInfo); #ifdef DEBUG if (verbose) { printf("Successfully inlined %s (%d IL bytes) (depth %d) [%s]\n", eeGetMethodFullName(fncHandle), inlineCandidateInfo->methInfo.ILCodeSize, inlineDepth, inlineResult->ReasonString()); } if (verbose) { printf("--------------------------------------------------------------------------------------------\n"); } #endif // DEBUG #if defined(DEBUG) impInlinedCodeSize += inlineCandidateInfo->methInfo.ILCodeSize; #endif // We inlined... inlineResult->NoteSuccess(); } //------------------------------------------------------------------------ // fgInsertInlineeBlocks: incorporate statements for an inline into the // root method. // // Arguments: // inlineInfo -- info for the inline // // Notes: // The inlining attempt cannot be failed once this method is called. // // Adds all inlinee statements, plus any glue statements needed // either before or after the inlined call. // // Updates flow graph and assigns weights to inlinee // blocks. Currently does not attempt to read IBC data for the // inlinee. // // Updates relevant root method status flags (eg optMethodFlags) to // include information from the inlinee. // // Marks newly added statements with an appropriate inline context. void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) { GenTreeCall* iciCall = pInlineInfo->iciCall; Statement* iciStmt = pInlineInfo->iciStmt; BasicBlock* iciBlock = pInlineInfo->iciBlock; noway_assert(iciBlock->bbStmtList != nullptr); noway_assert(iciStmt->GetRootNode() != nullptr); assert(iciStmt->GetRootNode() == iciCall); noway_assert(iciCall->gtOper == GT_CALL); #ifdef DEBUG Statement* currentDumpStmt = nullptr; if (verbose) { printf("\n\n----------- Statements (and blocks) added due to the inlining of call "); printTreeID(iciCall); printf(" -----------\n"); } #endif // DEBUG // Mark success. pInlineInfo->inlineContext->SetSucceeded(pInlineInfo); // Prepend statements Statement* stmtAfter = fgInlinePrependStatements(pInlineInfo); #ifdef DEBUG if (verbose) { currentDumpStmt = stmtAfter; printf("\nInlinee method body:"); } #endif // DEBUG BasicBlock* topBlock = iciBlock; BasicBlock* bottomBlock = nullptr; if (InlineeCompiler->fgBBcount == 1) { // When fgBBCount is 1 we will always have a non-NULL fgFirstBB // PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr); // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) { stmtAfter = fgInsertStmtListAfter(iciBlock, stmtAfter, InlineeCompiler->fgFirstBB->firstStmt()); } // Copy inlinee bbFlags to caller bbFlags. const BasicBlockFlags inlineeBlockFlags = InlineeCompiler->fgFirstBB->bbFlags; noway_assert((inlineeBlockFlags & BBF_HAS_JMP) == 0); noway_assert((inlineeBlockFlags & BBF_KEEP_BBJ_ALWAYS) == 0); // Todo: we may want to exclude other flags here. iciBlock->bbFlags |= (inlineeBlockFlags & ~BBF_RUN_RARELY); #ifdef DEBUG if (verbose) { noway_assert(currentDumpStmt); if (currentDumpStmt != stmtAfter) { do { currentDumpStmt = currentDumpStmt->GetNextStmt(); printf("\n"); gtDispStmt(currentDumpStmt); printf("\n"); } while (currentDumpStmt != stmtAfter); } } #endif // DEBUG // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, iciBlock, stmtAfter); goto _Done; } } // // ======= Inserting inlinee's basic blocks =============== // bottomBlock = fgNewBBafter(topBlock->bbJumpKind, topBlock, true); bottomBlock->bbRefs = 1; bottomBlock->bbJumpDest = topBlock->bbJumpDest; bottomBlock->inheritWeight(topBlock); topBlock->bbJumpKind = BBJ_NONE; // Update block flags { const BasicBlockFlags originalFlags = topBlock->bbFlags; noway_assert((originalFlags & BBF_SPLIT_NONEXIST) == 0); topBlock->bbFlags &= ~(BBF_SPLIT_LOST); bottomBlock->bbFlags |= originalFlags & BBF_SPLIT_GAINED; } // Split statements between topBlock and bottomBlock. // First figure out bottomBlock_Begin Statement* bottomBlock_Begin; bottomBlock_Begin = stmtAfter->GetNextStmt(); if (topBlock->bbStmtList == nullptr) { // topBlock is empty before the split. // In this case, both topBlock and bottomBlock should be empty noway_assert(bottomBlock_Begin == nullptr); topBlock->bbStmtList = nullptr; bottomBlock->bbStmtList = nullptr; } else if (topBlock->bbStmtList == bottomBlock_Begin) { noway_assert(bottomBlock_Begin != nullptr); // topBlock contains at least one statement before the split. // And the split is before the first statement. // In this case, topBlock should be empty, and everything else should be moved to the bottomBlock. bottomBlock->bbStmtList = topBlock->bbStmtList; topBlock->bbStmtList = nullptr; } else if (bottomBlock_Begin == nullptr) { noway_assert(topBlock->bbStmtList != nullptr); // topBlock contains at least one statement before the split. // And the split is at the end of the topBlock. // In this case, everything should be kept in the topBlock, and the bottomBlock should be empty bottomBlock->bbStmtList = nullptr; } else { noway_assert(topBlock->bbStmtList != nullptr); noway_assert(bottomBlock_Begin != nullptr); // This is the normal case where both blocks should contain at least one statement. Statement* topBlock_Begin = topBlock->firstStmt(); noway_assert(topBlock_Begin != nullptr); Statement* topBlock_End = bottomBlock_Begin->GetPrevStmt(); noway_assert(topBlock_End != nullptr); Statement* bottomBlock_End = topBlock->lastStmt(); noway_assert(bottomBlock_End != nullptr); // Break the linkage between 2 blocks. topBlock_End->SetNextStmt(nullptr); // Fix up all the pointers. topBlock->bbStmtList = topBlock_Begin; topBlock->bbStmtList->SetPrevStmt(topBlock_End); bottomBlock->bbStmtList = bottomBlock_Begin; bottomBlock->bbStmtList->SetPrevStmt(bottomBlock_End); } // // Set the try and handler index and fix the jump types of inlinee's blocks. // for (BasicBlock* const block : InlineeCompiler->Blocks()) { noway_assert(!block->hasTryIndex()); noway_assert(!block->hasHndIndex()); block->copyEHRegion(iciBlock); block->bbFlags |= iciBlock->bbFlags & BBF_BACKWARD_JUMP; DebugInfo di = iciStmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { block->bbCodeOffs = di.GetLocation().GetOffset(); block->bbCodeOffsEnd = block->bbCodeOffs + 1; // TODO: is code size of 1 some magic number for inlining? } else { block->bbCodeOffs = 0; // TODO: why not BAD_IL_OFFSET? block->bbCodeOffsEnd = 0; block->bbFlags |= BBF_INTERNAL; } if (block->bbJumpKind == BBJ_RETURN) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); block->bbJumpKind = BBJ_NONE; } } } // Insert inlinee's blocks into inliner's block list. topBlock->setNext(InlineeCompiler->fgFirstBB); InlineeCompiler->fgLastBB->setNext(bottomBlock); // // Add inlinee's block count to inliner's. // fgBBcount += InlineeCompiler->fgBBcount; // Append statements to null out gc ref locals, if necessary. fgInlineAppendStatements(pInlineInfo, bottomBlock, nullptr); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(InlineeCompiler->fgFirstBB, InlineeCompiler->fgLastBB, true); } #endif // DEBUG _Done: // // At this point, we have successully inserted inlinee's code. // // // Copy out some flags // compLongUsed |= InlineeCompiler->compLongUsed; compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed; compLocallocUsed |= InlineeCompiler->compLocallocUsed; compLocallocOptimized |= InlineeCompiler->compLocallocOptimized; compQmarkUsed |= InlineeCompiler->compQmarkUsed; compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; compHasBackwardJump |= InlineeCompiler->compHasBackwardJump; lvaGenericsContextInUse |= InlineeCompiler->lvaGenericsContextInUse; #ifdef FEATURE_SIMD if (InlineeCompiler->usesSIMDTypes()) { setUsesSIMDTypes(true); } #endif // FEATURE_SIMD // Update unmanaged call details info.compUnmanagedCallCountWithGCTransition += InlineeCompiler->info.compUnmanagedCallCountWithGCTransition; // Update stats for inlinee PGO // if (InlineeCompiler->fgPgoSchema != nullptr) { fgPgoInlineePgo++; } else if (InlineeCompiler->fgPgoFailReason != nullptr) { // Single block inlinees may not have probes // when we've ensabled minimal profiling (which // is now the default). // if (InlineeCompiler->fgBBcount == 1) { fgPgoInlineeNoPgoSingleBlock++; } else { fgPgoInlineeNoPgo++; } } // Update optMethodFlags CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG unsigned optMethodFlagsBefore = optMethodFlags; #endif optMethodFlags |= InlineeCompiler->optMethodFlags; #ifdef DEBUG if (optMethodFlags != optMethodFlagsBefore) { JITDUMP("INLINER: Updating optMethodFlags -- root:%0x callee:%0x new:%0x\n", optMethodFlagsBefore, InlineeCompiler->optMethodFlags, optMethodFlags); } #endif // If an inlinee needs GS cookie we need to make sure that the cookie will not be allocated at zero stack offset. // Note that if the root method needs GS cookie then this has already been taken care of. if (!getNeedsGSSecurityCookie() && InlineeCompiler->getNeedsGSSecurityCookie()) { setNeedsGSSecurityCookie(); const unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy for inlinee")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // If there is non-NULL return, replace the GT_CALL with its return value expression, // so later it will be picked up by the GT_RET_EXPR node. if ((pInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID) || (iciCall->gtReturnType == TYP_STRUCT)) { noway_assert(pInlineInfo->retExpr); #ifdef DEBUG if (verbose) { printf("\nReturn expression for call at "); printTreeID(iciCall); printf(" is\n"); gtDispTree(pInlineInfo->retExpr); } #endif // DEBUG // Replace the call with the return expression. Note that iciCall won't be part of the IR // but may still be referenced from a GT_RET_EXPR node. We will replace GT_RET_EXPR node // in fgUpdateInlineReturnExpressionPlaceHolder. At that time we will also update the flags // on the basic block of GT_RET_EXPR node. if (iciCall->gtInlineCandidateInfo->retExpr->OperGet() == GT_RET_EXPR) { // Save the basic block flags from the retExpr basic block. iciCall->gtInlineCandidateInfo->retExpr->AsRetExpr()->bbFlags = pInlineInfo->retBB->bbFlags; } if (bottomBlock != nullptr) { // We've split the iciblock into two and the RET_EXPR was possibly moved to the bottomBlock // so let's update its flags with retBB's ones bottomBlock->bbFlags |= pInlineInfo->retBB->bbFlags & BBF_COMPACT_UPD; } iciCall->ReplaceWith(pInlineInfo->retExpr, this); } // // Detach the GT_CALL node from the original statement by hanging a "nothing" node under it, // so that fgMorphStmts can remove the statement once we return from here. // iciStmt->SetRootNode(gtNewNothingNode()); } //------------------------------------------------------------------------ // fgInlinePrependStatements: prepend statements needed to match up // caller and inlined callee // // Arguments: // inlineInfo -- info for the inline // // Return Value: // The last statement that was added, or the original call if no // statements were added. // // Notes: // Statements prepended may include the following: // * This pointer null check // * Class initialization // * Zeroing of must-init locals in the callee // * Passing of call arguments via temps // // Newly added statements are placed just after the original call // and are are given the same inline context as the call any calls // added here will appear to have been part of the immediate caller. Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { BasicBlock* block = inlineInfo->iciBlock; Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); Statement* postStmt = callStmt->GetNextStmt(); Statement* afterStmt = callStmt; // afterStmt is the place where the new statements should be inserted after. Statement* newStmt = nullptr; GenTreeCall* call = inlineInfo->iciCall->AsCall(); noway_assert(call->gtOper == GT_CALL); #ifdef DEBUG if (0 && verbose) { printf("\nfgInlinePrependStatements for iciCall= "); printTreeID(call); printf(":\n"); } #endif // Prepend statements for any initialization / side effects InlArgInfo* inlArgInfo = inlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; GenTree* tree; // Create the null check statement (but not appending it to the statement list yet) for the 'this' pointer if // necessary. // The NULL check should be done after "argument setup statements". // The only reason we move it here is for calling "impInlineFetchArg(0,..." to reserve a temp // for the "this" pointer. // Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner. // However the assetionProp logic will remove any unecessary null checks that we may have added // GenTree* nullcheck = nullptr; if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst) { // Call impInlineFetchArg to "reserve" a temp for the "this" pointer. GenTree* thisOp = impInlineFetchArg(0, inlArgInfo, lclVarInfo); if (fgAddrCouldBeNull(thisOp)) { nullcheck = gtNewNullCheck(thisOp, block); // The NULL-check statement will be inserted to the statement list after those statements // that assign arguments to temps and before the actual body of the inlinee method. } } /* Treat arguments that had to be assigned to temps */ if (inlineInfo->argCnt) { #ifdef DEBUG if (verbose) { printf("\nArguments setup:\n"); } #endif // DEBUG for (unsigned argNum = 0; argNum < inlineInfo->argCnt; argNum++) { const InlArgInfo& argInfo = inlArgInfo[argNum]; const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; GenTree* argNode = inlArgInfo[argNum].argNode; const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE); BasicBlockFlags bbFlags = BBF_EMPTY; argNode = argNode->gtSkipPutArgType(); argNode = argNode->gtRetExprVal(&bbFlags); if (argInfo.argHasTmp) { noway_assert(argInfo.argIsUsed); /* argBashTmpNode is non-NULL iff the argument's value was referenced exactly once by the original IL. This offers an opportunity to avoid an intermediate temp and just insert the original argument tree. However, if the temp node has been cloned somewhere while importing (e.g. when handling isinst or dup), or if the IL took the address of the argument, then argBashTmpNode will be set (because the value was only explicitly retrieved once) but the optimization cannot be applied. */ GenTree* argSingleUseNode = argInfo.argBashTmpNode; // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`, // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise. if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef && !argHasPutArg) { // Change the temp in-place to the actual argument. // We currently do not support this for struct arguments, so it must not be a GT_OBJ. assert(argNode->gtOper != GT_OBJ); argSingleUseNode->ReplaceWith(argNode, this); continue; } else { // We're going to assign the argument value to the // temp we use for it in the inline body. const unsigned tmpNum = argInfo.argTmpNum; const var_types argType = lclVarInfo[argNum].lclTypeInfo; // Create the temp assignment for this argument CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; if (varTypeIsStruct(argType)) { structHnd = gtGetStructHandleIfPresent(argNode); noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT)); } // Unsafe value cls check is not needed for // argTmpNum here since in-linee compiler instance // would have iterated over these and marked them // accordingly. impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); // We used to refine the temp type here based on // the actual arg, but we now do this up front, when // creating the temp, over in impInlineFetchArg. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } else if (argInfo.argIsByRefToStructLocal) { // Do nothing. Arg was directly substituted as we read // the inlinee. } else { /* The argument is either not used or a const or lcl var */ noway_assert(!argInfo.argIsUsed || argInfo.argIsInvariant || argInfo.argIsLclVar); /* Make sure we didnt change argNode's along the way, or else subsequent uses of the arg would have worked with the bashed value */ if (argInfo.argIsInvariant) { assert(argNode->OperIsConst() || argNode->gtOper == GT_ADDR); } noway_assert((argInfo.argIsLclVar == 0) == (argNode->gtOper != GT_LCL_VAR || (argNode->gtFlags & GTF_GLOB_REF))); /* If the argument has side effects, append it */ if (argInfo.argHasSideEff) { noway_assert(argInfo.argIsUsed == false); newStmt = nullptr; bool append = true; if (argNode->gtOper == GT_OBJ || argNode->gtOper == GT_MKREFANY) { // Don't put GT_OBJ node under a GT_COMMA. // Codegen can't deal with it. // Just hang the address here in case there are side-effect. newStmt = gtNewStmt(gtUnusedValNode(argNode->AsOp()->gtOp1), callDI); } else { // In some special cases, unused args with side effects can // trigger further changes. // // (1) If the arg is a static field access and the field access // was produced by a call to EqualityComparer<T>.get_Default, the // helper call to ensure the field has a value can be suppressed. // This helper call is marked as a "Special DCE" helper during // importation, over in fgGetStaticsCCtorHelper. // // (2) NYI. If, after tunneling through GT_RET_VALs, we find that // the actual arg expression has no side effects, we can skip // appending all together. This will help jit TP a bit. // // Chase through any GT_RET_EXPRs to find the actual argument // expression. GenTree* actualArgNode = argNode->gtRetExprVal(&bbFlags); // For case (1) // // Look for the following tree shapes // prejit: (IND (ADD (CONST, CALL(special dce helper...)))) // jit : (COMMA (CALL(special dce helper...), (FIELD ...))) if (actualArgNode->gtOper == GT_COMMA) { // Look for (COMMA (CALL(special dce helper...), (FIELD ...))) GenTree* op1 = actualArgNode->AsOp()->gtOp1; GenTree* op2 = actualArgNode->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && (op2->gtOper == GT_FIELD) && ((op2->gtFlags & GTF_EXCEPT) == 0)) { JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); // Drop the whole tree append = false; } } else if (actualArgNode->gtOper == GT_IND) { // Look for (IND (ADD (CONST, CALL(special dce helper...)))) GenTree* addr = actualArgNode->AsOp()->gtOp1; if (addr->gtOper == GT_ADD) { GenTree* op1 = addr->AsOp()->gtOp1; GenTree* op2 = addr->AsOp()->gtOp2; if (op1->IsCall() && ((op1->AsCall()->gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && op2->IsCnsIntOrI()) { // Drop the whole tree JITDUMP("\nPerforming special dce on unused arg [%06u]:" " actual arg [%06u] helper call [%06u]\n", argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); append = false; } } } } if (!append) { assert(newStmt == nullptr); JITDUMP("Arg tree side effects were discardable, not appending anything for arg\n"); } else { // If we don't have something custom to append, // just append the arg node as an unused value. if (newStmt == nullptr) { newStmt = gtNewStmt(gtUnusedValNode(argNode), callDI); } fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } else if (argNode->IsBoxedValue()) { // Try to clean up any unnecessary boxing side effects // since the box itself will be ignored. gtTryRemoveBoxUpstreamEffects(argNode); } block->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); } } } // Add the CCTOR check if asked for. // Note: We no longer do the optimization that is done before by staticAccessedFirstUsingHelper in the old inliner. // Therefore we might prepend redundant call to HELPER.CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE // before the inlined method body, even if a static field of this type was accessed in the inlinee // using a helper before any other observable side-effect. if (inlineInfo->inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_USE_HELPER) { CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(inlineInfo->inlineCandidateInfo->exactContextHnd); tree = fgGetSharedCCtor(exactClass); newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // Insert the nullcheck statement now. if (nullcheck) { newStmt = gtNewStmt(nullcheck, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } // // Now zero-init inlinee locals // CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the // normal logic in the caller to insert zero-init in the prolog if necessary. if ((lclCnt != 0) && ((InlineeMethodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0) && ((bbInALoop && !bbIsReturn) || !info.compInitMem)) { #ifdef DEBUG if (verbose) { printf("\nZero init inlinee locals:\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // If the local is used check whether we need to insert explicit zero initialization. if (tmpNum != BAD_VAR_NUM) { LclVarDsc* const tmpDsc = lvaGetDesc(tmpNum); if (!fgVarNeedsExplicitZeroInit(tmpNum, bbInALoop, bbIsReturn)) { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", tmpNum); tmpDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; continue; } var_types lclTyp = (var_types)lvaTable[tmpNum].lvType; noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); if (!varTypeIsStruct(lclTyp)) { // Unsafe value cls check is not needed here since in-linee compiler instance would have // iterated over locals and marked accordingly. impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_NONE, &afterStmt, callDI, block); } else { tree = gtNewBlkOpNode(gtNewLclvNode(tmpNum, lclTyp), // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); afterStmt = newStmt; } #ifdef DEBUG if (verbose) { gtDispStmt(afterStmt); } #endif // DEBUG } } } return afterStmt; } //------------------------------------------------------------------------ // fgInlineAppendStatements: Append statements that are needed // after the inlined call. // // Arguments: // inlineInfo - information about the inline // block - basic block for the new statements // stmtAfter - (optional) insertion point for mid-block cases // // Notes: // If the call we're inlining is in tail position then // we skip nulling the locals, since it can interfere // with tail calls introduced by the local. void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmtAfter) { // Null out any gc ref locals if (!inlineInfo->HasGcRefLocals()) { // No ref locals, nothing to do. JITDUMP("fgInlineAppendStatements: no gc ref inline locals.\n"); return; } if (inlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("fgInlineAppendStatements: implicit tail call; skipping nulling.\n"); return; } JITDUMP("fgInlineAppendStatements: nulling out gc ref inlinee locals.\n"); Statement* callStmt = inlineInfo->iciStmt; const DebugInfo& callDI = callStmt->GetDebugInfo(); CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; const unsigned lclCnt = InlineeMethodInfo->locals.numArgs; InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; unsigned gcRefLclCnt = inlineInfo->numberOfGcRefLocals; const unsigned argCnt = inlineInfo->argCnt; for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) { // Is the local a gc ref type? Need to look at the // inline info for this since we will not have local // temps for unused inlinee locals. const var_types lclTyp = lclVarInfo[argCnt + lclNum].lclTypeInfo; if (!varTypeIsGC(lclTyp)) { // Nope, nothing to null out. continue; } // Ensure we're examining just the right number of locals. assert(gcRefLclCnt > 0); gcRefLclCnt--; // Fetch the temp for this inline local const unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; // Is the local used at all? if (tmpNum == BAD_VAR_NUM) { // Nope, nothing to null out. continue; } // Local was used, make sure the type is consistent. assert(lvaTable[tmpNum].lvType == lclTyp); // Does the local we're about to null out appear in the return // expression? If so we somehow messed up and didn't properly // spill the return value. See impInlineFetchLocal. GenTree* retExpr = inlineInfo->retExpr; if (retExpr != nullptr) { const bool interferesWithReturn = gtHasRef(inlineInfo->retExpr, tmpNum); noway_assert(!interferesWithReturn); } // Assign null to the local. GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); Statement* nullStmt = gtNewStmt(nullExpr, callDI); if (stmtAfter == nullptr) { fgInsertStmtAtBeg(block, nullStmt); } else { fgInsertStmtAfter(block, stmtAfter, nullStmt); } stmtAfter = nullStmt; #ifdef DEBUG if (verbose) { gtDispStmt(nullStmt); } #endif // DEBUG } // There should not be any GC ref locals left to null out. assert(gcRefLclCnt == 0); } //------------------------------------------------------------------------ // fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns // as a temp. // // Return Value: // true if the inlinee has to spill return exprs. bool Compiler::fgNeedReturnSpillTemp() { assert(compIsForInlining()); return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM); }
1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/importer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); compUnsafeCastUsed = true; } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); compUnsafeCastUsed = true; } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); compUnsafeCastUsed = true; } } else { Verify(false, "pointer not byref"); compUnsafeCastUsed = true; } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) { // We should have noted this earlier and bailed out of OSR. // assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: compUnsafeCastUsed = true; // Have to go conservative STIND_POST_VERIFY: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: compUnsafeCastUsed = true; // Have to go conservative LDIND_POST_VERIFY: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND_POST_VERIFY; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } compUnsafeCastUsed = true; if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND_POST_VERIFY; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND_POST_VERIFY; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); compUnsafeCastUsed = true; if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND_POST_VERIFY; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) { // We should have noted this earlier and bailed out of OSR. // assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/native/external/zlib-intel/inffixed.h
/* inffixed.h -- table for decoding fixed codes * Generated automatically by makefixed(). */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of this library and is subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, {0,9,255} }; static const code distfix[32] = { {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, {22,5,193},{64,5,0} };
/* inffixed.h -- table for decoding fixed codes * Generated automatically by makefixed(). */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of this library and is subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, {0,9,255} }; static const code distfix[32] = { {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, {22,5,193},{64,5,0} };
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/miscellaneous/InterlockedCompareExchangePointer/test1/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test.c ** ** Purpose: Test for InterlockedCompareExchangePointer() function ** ** **=========================================================*/ /* This test is FINISHED. Note: The biggest feature of this function is that it locks the value before it increments it -- in order to make it so only one thread can access it. But, I really don't have a great test to make sure it's thread safe. Any ideas? */ #include <palsuite.h> PALTEST(miscellaneous_InterlockedCompareExchangePointer_test1_paltest_interlockedcompareexchangepointer_test1, "miscellaneous/InterlockedCompareExchangePointer/test1/paltest_interlockedcompareexchangepointer_test1") { long StartValue = 5; long NewValue = 10; PVOID ReturnValue = NULL; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnValue = InterlockedCompareExchangePointer((PVOID)&StartValue, (PVOID)NewValue, (PVOID)StartValue); /* StartValue and NewValue should be equal now */ if(StartValue != NewValue) { Fail("ERROR: These values should be equal after the exchange. " "They should both be %d, however the value that should have " "been exchanged is %d.\n",NewValue,StartValue); } /* Returnvalue should have been set to what 'StartValue' was (5 in this case) */ if((int)(size_t)ReturnValue != 5) { Fail("ERROR: The return value should be the value of the " "variable before the exchange took place, which was 5. " "But, the return value was %d.\n",ReturnValue); } /* This is a mismatch, so no exchange should happen */ InterlockedCompareExchangePointer((PVOID)&StartValue, ReturnValue, ReturnValue); if(StartValue != NewValue) { Fail("ERROR: The compare should have failed and no exchange should " "have been made, but it seems the exchange still happened.\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test.c ** ** Purpose: Test for InterlockedCompareExchangePointer() function ** ** **=========================================================*/ /* This test is FINISHED. Note: The biggest feature of this function is that it locks the value before it increments it -- in order to make it so only one thread can access it. But, I really don't have a great test to make sure it's thread safe. Any ideas? */ #include <palsuite.h> PALTEST(miscellaneous_InterlockedCompareExchangePointer_test1_paltest_interlockedcompareexchangepointer_test1, "miscellaneous/InterlockedCompareExchangePointer/test1/paltest_interlockedcompareexchangepointer_test1") { long StartValue = 5; long NewValue = 10; PVOID ReturnValue = NULL; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnValue = InterlockedCompareExchangePointer((PVOID)&StartValue, (PVOID)NewValue, (PVOID)StartValue); /* StartValue and NewValue should be equal now */ if(StartValue != NewValue) { Fail("ERROR: These values should be equal after the exchange. " "They should both be %d, however the value that should have " "been exchanged is %d.\n",NewValue,StartValue); } /* Returnvalue should have been set to what 'StartValue' was (5 in this case) */ if((int)(size_t)ReturnValue != 5) { Fail("ERROR: The return value should be the value of the " "variable before the exchange took place, which was 5. " "But, the return value was %d.\n",ReturnValue); } /* This is a mismatch, so no exchange should happen */ InterlockedCompareExchangePointer((PVOID)&StartValue, ReturnValue, ReturnValue); if(StartValue != NewValue) { Fail("ERROR: The compare should have failed and no exchange should " "have been made, but it seems the exchange still happened.\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/md/compiler/classfactory.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // ClassFactory.h // // // Class factories are used by the pluming in COM to activate new objects. // This module contains the class factory code to instantiate the debugger // objects described in <cordb.h>. // //***************************************************************************** #ifndef __ClassFactory__h__ #define __ClassFactory__h__ #include "disp.h" // This typedef is for a function which will create a new instance of an object. typedef HRESULT (* PFN_CREATE_OBJ)(REFIID riid, void **ppvObject); //***************************************************************************** // This structure is used to declare a global list of coclasses. The class // factory object is created with a pointer to the correct one of these, so // that when create instance is called, it can be created. //***************************************************************************** struct COCLASS_REGISTER { const GUID *pClsid; // Class ID of the coclass. LPCWSTR szProgID; // Prog ID of the class. PFN_CREATE_OBJ pfnCreateObject; // Creation function for an instance. }; //***************************************************************************** // One class factory object satifies all of our clsid's, to reduce overall // code bloat. //***************************************************************************** class MDClassFactory : public IClassFactory { MDClassFactory() { } // Can't use without data. public: MDClassFactory(const COCLASS_REGISTER *pCoClass) : m_cRef(1), m_pCoClass(pCoClass) { } virtual ~MDClassFactory() {} // // IUnknown methods. // virtual HRESULT STDMETHODCALLTYPE QueryInterface( REFIID riid, void **ppvObject); virtual ULONG STDMETHODCALLTYPE AddRef() { return InterlockedIncrement(&m_cRef); } virtual ULONG STDMETHODCALLTYPE Release() { LONG cRef = InterlockedDecrement(&m_cRef); if (cRef <= 0) delete this; return (cRef); } // // IClassFactory methods. // virtual HRESULT STDMETHODCALLTYPE CreateInstance( IUnknown *pUnkOuter, REFIID riid, void **ppvObject); virtual HRESULT STDMETHODCALLTYPE LockServer( BOOL fLock); private: LONG m_cRef; // Reference count. const COCLASS_REGISTER *m_pCoClass; // The class we belong to. }; #endif // __ClassFactory__h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // ClassFactory.h // // // Class factories are used by the pluming in COM to activate new objects. // This module contains the class factory code to instantiate the debugger // objects described in <cordb.h>. // //***************************************************************************** #ifndef __ClassFactory__h__ #define __ClassFactory__h__ #include "disp.h" // This typedef is for a function which will create a new instance of an object. typedef HRESULT (* PFN_CREATE_OBJ)(REFIID riid, void **ppvObject); //***************************************************************************** // This structure is used to declare a global list of coclasses. The class // factory object is created with a pointer to the correct one of these, so // that when create instance is called, it can be created. //***************************************************************************** struct COCLASS_REGISTER { const GUID *pClsid; // Class ID of the coclass. LPCWSTR szProgID; // Prog ID of the class. PFN_CREATE_OBJ pfnCreateObject; // Creation function for an instance. }; //***************************************************************************** // One class factory object satifies all of our clsid's, to reduce overall // code bloat. //***************************************************************************** class MDClassFactory : public IClassFactory { MDClassFactory() { } // Can't use without data. public: MDClassFactory(const COCLASS_REGISTER *pCoClass) : m_cRef(1), m_pCoClass(pCoClass) { } virtual ~MDClassFactory() {} // // IUnknown methods. // virtual HRESULT STDMETHODCALLTYPE QueryInterface( REFIID riid, void **ppvObject); virtual ULONG STDMETHODCALLTYPE AddRef() { return InterlockedIncrement(&m_cRef); } virtual ULONG STDMETHODCALLTYPE Release() { LONG cRef = InterlockedDecrement(&m_cRef); if (cRef <= 0) delete this; return (cRef); } // // IClassFactory methods. // virtual HRESULT STDMETHODCALLTYPE CreateInstance( IUnknown *pUnkOuter, REFIID riid, void **ppvObject); virtual HRESULT STDMETHODCALLTYPE LockServer( BOOL fLock); private: LONG m_cRef; // Reference count. const COCLASS_REGISTER *m_pCoClass; // The class we belong to. }; #endif // __ClassFactory__h__
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/debug/dbgutil/machoreader.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <windows.h> #include <clrdata.h> #include <cor.h> #include <cordebug.h> #define __STDC_FORMAT_MACROS #include <inttypes.h> #include <arrayholder.h> #include "machoreader.h" #if TARGET_64BIT #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 #define PRIA "016" #define PRIxA PRIA PRIx #else #define PRIx PRIx32 #define PRIu PRIu32 #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx #endif class MachOReaderExport : public MachOReader { private: ICorDebugDataTarget* m_dataTarget; public: MachOReaderExport(ICorDebugDataTarget* dataTarget) : m_dataTarget(dataTarget) { dataTarget->AddRef(); } virtual ~MachOReaderExport() { m_dataTarget->Release(); } private: virtual bool ReadMemory(void* address, void* buffer, size_t size) { uint32_t read = 0; return SUCCEEDED(m_dataTarget->ReadVirtual(reinterpret_cast<CLRDATA_ADDRESS>(address), reinterpret_cast<PBYTE>(buffer), (uint32_t)size, &read)); } }; // // Main entry point to get an export symbol // extern "C" bool TryGetSymbol(ICorDebugDataTarget* dataTarget, uint64_t baseAddress, const char* symbolName, uint64_t* symbolAddress) { MachOReaderExport reader(dataTarget); MachOModule module(reader, baseAddress); if (!module.ReadHeader()) { return false; } uint64_t symbolOffset; if (module.TryLookupSymbol(symbolName, &symbolOffset)) { *symbolAddress = symbolOffset; return true; } *symbolAddress = 0; return false; } //-------------------------------------------------------------------- // MachO module //-------------------------------------------------------------------- MachOModule::MachOModule(MachOReader& reader, mach_vm_address_t baseAddress, mach_header_64* header, std::string* name) : m_reader(reader), m_baseAddress(baseAddress), m_loadBias(0), m_commands(nullptr), m_symtabCommand(nullptr), m_nlists(nullptr), m_strtabAddress(0) { if (header != nullptr) { m_header = *header; } if (name != nullptr) { m_name = *name; } } MachOModule::~MachOModule() { if (m_commands != nullptr) { free(m_commands); m_commands = nullptr; } if (m_nlists != nullptr) { free(m_nlists); m_nlists = nullptr; } } bool MachOModule::ReadHeader() { _ASSERTE(sizeof(m_header) == sizeof(mach_header_64)); if (!m_reader.ReadMemory((void*)m_baseAddress, &m_header, sizeof(mach_header_64))) { m_reader.Trace("ERROR: failed to read header at %p\n", (void*)m_baseAddress); return false; } return true; } bool MachOModule::TryLookupSymbol(const char* symbolName, uint64_t* symbolValue) { _ASSERTE(symbolValue != nullptr); if (ReadSymbolTable()) { _ASSERTE(m_nlists != nullptr); _ASSERTE(m_strtabAddress != 0); // First, search just the "external" export symbols if (TryLookupSymbol(m_dysymtabCommand->iextdefsym, m_dysymtabCommand->nextdefsym, symbolName, symbolValue)) { m_reader.Trace("SYM: Found '%s' in external symbols\n", symbolName); return true; } m_reader.Trace("SYM: Missed '%s' in external symbols\n", symbolName); // If not found in external symbols, search all of them if (TryLookupSymbol(0, m_symtabCommand->nsyms, symbolName, symbolValue)) { m_reader.Trace("SYM: Found '%s' in all symbols\n", symbolName); return true; } m_reader.Trace("SYM: Missed '%s' in all symbols\n", symbolName); } *symbolValue = 0; return false; } bool MachOModule::TryLookupSymbol(int start, int nsyms, const char* symbolName, uint64_t* symbolValue) { for (int i = 0; i < nsyms; i++) { std::string name = GetSymbolName(start + i); // Skip the leading underscores to match Linux externs const char* currentName = name.length() > 0 && name[0] == '_' ? name.c_str() + 1 : name.c_str(); // Does this symbol match? if (strcmp(currentName, symbolName) == 0) { *symbolValue = m_loadBias + m_nlists[start + i].n_value; return true; } } *symbolValue = 0; return false; } bool MachOModule::EnumerateSegments() { if (!ReadLoadCommands()) { return false; } _ASSERTE(!m_segments.empty()); for (const segment_command_64* segment : m_segments) { m_reader.VisitSegment(*this, *segment); const section_64* section = (section_64*)((uint64_t)segment + sizeof(segment_command_64)); for (int s = 0; s < segment->nsects; s++, section++) { m_reader.VisitSection(*this, *section); } } return true; } bool MachOModule::ReadLoadCommands() { if (m_commands == nullptr) { // Read load commands void* commandsAddress = (void*)(m_baseAddress + sizeof(mach_header_64)); m_commands = (load_command*)malloc(m_header.sizeofcmds); if (m_commands == nullptr) { m_reader.Trace("ERROR: Failed to allocate %d byte load commands\n", m_header.sizeofcmds); return false; } if (!m_reader.ReadMemory(commandsAddress, m_commands, m_header.sizeofcmds)) { m_reader.Trace("ERROR: Failed to read load commands at %p of %d\n", commandsAddress, m_header.sizeofcmds); return false; } load_command* command = m_commands; for (int i = 0; i < m_header.ncmds; i++) { m_reader.TraceVerbose("CMD: load command cmd %02x (%d) size %d\n", command->cmd, command->cmd, command->cmdsize); switch (command->cmd) { case LC_SYMTAB: m_symtabCommand = (symtab_command*)command; break; case LC_DYSYMTAB: m_dysymtabCommand = (dysymtab_command*)command; break; case LC_SEGMENT_64: segment_command_64* segment = (segment_command_64*)command; m_segments.push_back(segment); // Calculate the load bias for the module. This is the value to add to the vmaddr of a // segment to get the actual address. if (strcmp(segment->segname, SEG_TEXT) == 0) { m_loadBias = m_baseAddress - segment->vmaddr; } m_reader.TraceVerbose("CMD: vmaddr %016llx vmsize %016llx fileoff %016llx filesize %016llx nsects %d max %c%c%c init %c%c%c %02x %s\n", segment->vmaddr, segment->vmsize, segment->fileoff, segment->filesize, segment->nsects, (segment->maxprot & VM_PROT_READ) ? 'r' : '-', (segment->maxprot & VM_PROT_WRITE) ? 'w' : '-', (segment->maxprot & VM_PROT_EXECUTE) ? 'x' : '-', (segment->initprot & VM_PROT_READ) ? 'r' : '-', (segment->initprot & VM_PROT_WRITE) ? 'w' : '-', (segment->initprot & VM_PROT_EXECUTE) ? 'x' : '-', segment->flags, segment->segname); section_64* section = (section_64*)((uint64_t)segment + sizeof(segment_command_64)); for (int s = 0; s < segment->nsects; s++, section++) { m_reader.TraceVerbose(" addr %016llx size %016llx off %08x align %02x flags %02x %s\n", section->addr, section->size, section->offset, section->align, section->flags, section->sectname); } break; } // Get next load command command = (load_command*)((char*)command + command->cmdsize); } m_reader.TraceVerbose("CMD: load bias %016llx\n", m_loadBias); } return true; } bool MachOModule::ReadSymbolTable() { if (m_nlists == nullptr) { if (!ReadLoadCommands()) { return false; } _ASSERTE(m_symtabCommand != nullptr); _ASSERTE(m_strtabAddress == 0); m_reader.TraceVerbose("SYM: symoff %08x nsyms %d stroff %08x strsize %d iext %d next %d iundef %d nundef %d extref %d nextref %d\n", m_symtabCommand->symoff, m_symtabCommand->nsyms, m_symtabCommand->stroff, m_symtabCommand->strsize, m_dysymtabCommand->iextdefsym, m_dysymtabCommand->nextdefsym, m_dysymtabCommand->iundefsym, m_dysymtabCommand->nundefsym, m_dysymtabCommand->extrefsymoff, m_dysymtabCommand->nextrefsyms); // Read the entire symbol part of symbol table. An array of "nlist" structs. void* symbolTableAddress = (void*)GetAddressFromFileOffset(m_symtabCommand->symoff); size_t symtabSize = sizeof(nlist_64) * m_symtabCommand->nsyms; m_nlists = (nlist_64*)malloc(symtabSize); if (m_nlists == nullptr) { m_reader.Trace("ERROR: Failed to allocate %zu byte symtab\n", symtabSize); return false; } if (!m_reader.ReadMemory(symbolTableAddress, m_nlists, symtabSize)) { m_reader.Trace("ERROR: Failed to read symtab at %p of %zu\n", symbolTableAddress, symtabSize); return false; } // Save the symbol string table address. m_strtabAddress = GetAddressFromFileOffset(m_symtabCommand->stroff); } return true; } uint64_t MachOModule::GetAddressFromFileOffset(uint32_t offset) { _ASSERTE(!m_segments.empty()); for (const segment_command_64* segment : m_segments) { if (offset >= segment->fileoff && offset < (segment->fileoff + segment->filesize)) { return m_loadBias + offset + segment->vmaddr - segment->fileoff; } } return m_loadBias + offset; } std::string MachOModule::GetSymbolName(int index) { uint64_t symbolNameAddress = m_strtabAddress + m_nlists[index].n_un.n_strx; std::string result; while (true) { char c = 0; if (!m_reader.ReadMemory((void*)symbolNameAddress, &c, sizeof(char))) { m_reader.Trace("ERROR: Failed to read string table at %p\n", (void*)symbolNameAddress); break; } if (c == '\0') { break; } result.append(1, c); symbolNameAddress++; } return result; } //-------------------------------------------------------------------- // MachO reader //-------------------------------------------------------------------- MachOReader::MachOReader() { } bool MachOReader::EnumerateModules(mach_vm_address_t address, mach_header_64* header) { _ASSERTE(header->magic == MH_MAGIC_64); _ASSERTE(header->filetype == MH_DYLINKER); MachOModule dylinker(*this, address, header); // Search for symbol for the dyld image info cache uint64_t dyldInfoAddress = 0; if (!dylinker.TryLookupSymbol("dyld_all_image_infos", &dyldInfoAddress)) { Trace("ERROR: Can not find the _dyld_all_image_infos symbol\n"); return false; } // Read the all image info from the dylinker image dyld_all_image_infos dyldInfo; if (!ReadMemory((void*)dyldInfoAddress, &dyldInfo, sizeof(dyld_all_image_infos))) { Trace("ERROR: Failed to read dyld_all_image_infos at %p\n", (void*)dyldInfoAddress); return false; } std::string dylinkerPath; if (!ReadString(dyldInfo.dyldPath, dylinkerPath)) { Trace("ERROR: Failed to read name at %p\n", dyldInfo.dyldPath); return false; } dylinker.SetName(dylinkerPath); Trace("MOD: %016llx %08x %s\n", dylinker.BaseAddress(), dylinker.Header().flags, dylinker.Name().c_str()); VisitModule(dylinker); void* imageInfosAddress = (void*)dyldInfo.infoArray; size_t imageInfosSize = dyldInfo.infoArrayCount * sizeof(dyld_image_info); Trace("MOD: infoArray %p infoArrayCount %d\n", dyldInfo.infoArray, dyldInfo.infoArrayCount); ArrayHolder<dyld_image_info> imageInfos = new (std::nothrow) dyld_image_info[dyldInfo.infoArrayCount]; if (imageInfos == nullptr) { Trace("ERROR: Failed to allocate %zu byte image infos\n", imageInfosSize); return false; } if (!ReadMemory(imageInfosAddress, imageInfos, imageInfosSize)) { Trace("ERROR: Failed to read dyld_all_image_infos at %p\n", imageInfosAddress); return false; } for (int i = 0; i < dyldInfo.infoArrayCount; i++) { mach_vm_address_t imageAddress = (mach_vm_address_t)imageInfos[i].imageLoadAddress; const char* imageFilePathAddress = imageInfos[i].imageFilePath; std::string imagePath; if (!ReadString(imageFilePathAddress, imagePath)) { Trace("ERROR: Failed to read image name at %p\n", imageFilePathAddress); continue; } MachOModule module(*this, imageAddress, nullptr, &imagePath); if (!module.ReadHeader()) { continue; } Trace("MOD: %016llx %08x %s\n", imageAddress, module.Header().flags, imagePath.c_str()); VisitModule(module); } return true; } bool MachOReader::ReadString(const char* address, std::string& str) { for (int i = 0; i < MAX_LONGPATH; i++) { char c = 0; if (!ReadMemory((void*)(address + i), &c, sizeof(char))) { Trace("ERROR: Failed to read string at %p\n", (void*)(address + i)); return false; } if (c == '\0') { break; } str.append(1, c); } return true; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <windows.h> #include <clrdata.h> #include <cor.h> #include <cordebug.h> #define __STDC_FORMAT_MACROS #include <inttypes.h> #include <arrayholder.h> #include "machoreader.h" #if TARGET_64BIT #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 #define PRIA "016" #define PRIxA PRIA PRIx #else #define PRIx PRIx32 #define PRIu PRIu32 #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx #endif class MachOReaderExport : public MachOReader { private: ICorDebugDataTarget* m_dataTarget; public: MachOReaderExport(ICorDebugDataTarget* dataTarget) : m_dataTarget(dataTarget) { dataTarget->AddRef(); } virtual ~MachOReaderExport() { m_dataTarget->Release(); } private: virtual bool ReadMemory(void* address, void* buffer, size_t size) { uint32_t read = 0; return SUCCEEDED(m_dataTarget->ReadVirtual(reinterpret_cast<CLRDATA_ADDRESS>(address), reinterpret_cast<PBYTE>(buffer), (uint32_t)size, &read)); } }; // // Main entry point to get an export symbol // extern "C" bool TryGetSymbol(ICorDebugDataTarget* dataTarget, uint64_t baseAddress, const char* symbolName, uint64_t* symbolAddress) { MachOReaderExport reader(dataTarget); MachOModule module(reader, baseAddress); if (!module.ReadHeader()) { return false; } uint64_t symbolOffset; if (module.TryLookupSymbol(symbolName, &symbolOffset)) { *symbolAddress = symbolOffset; return true; } *symbolAddress = 0; return false; } //-------------------------------------------------------------------- // MachO module //-------------------------------------------------------------------- MachOModule::MachOModule(MachOReader& reader, mach_vm_address_t baseAddress, mach_header_64* header, std::string* name) : m_reader(reader), m_baseAddress(baseAddress), m_loadBias(0), m_commands(nullptr), m_symtabCommand(nullptr), m_nlists(nullptr), m_strtabAddress(0) { if (header != nullptr) { m_header = *header; } if (name != nullptr) { m_name = *name; } } MachOModule::~MachOModule() { if (m_commands != nullptr) { free(m_commands); m_commands = nullptr; } if (m_nlists != nullptr) { free(m_nlists); m_nlists = nullptr; } } bool MachOModule::ReadHeader() { _ASSERTE(sizeof(m_header) == sizeof(mach_header_64)); if (!m_reader.ReadMemory((void*)m_baseAddress, &m_header, sizeof(mach_header_64))) { m_reader.Trace("ERROR: failed to read header at %p\n", (void*)m_baseAddress); return false; } return true; } bool MachOModule::TryLookupSymbol(const char* symbolName, uint64_t* symbolValue) { _ASSERTE(symbolValue != nullptr); if (ReadSymbolTable()) { _ASSERTE(m_nlists != nullptr); _ASSERTE(m_strtabAddress != 0); // First, search just the "external" export symbols if (TryLookupSymbol(m_dysymtabCommand->iextdefsym, m_dysymtabCommand->nextdefsym, symbolName, symbolValue)) { m_reader.Trace("SYM: Found '%s' in external symbols\n", symbolName); return true; } m_reader.Trace("SYM: Missed '%s' in external symbols\n", symbolName); // If not found in external symbols, search all of them if (TryLookupSymbol(0, m_symtabCommand->nsyms, symbolName, symbolValue)) { m_reader.Trace("SYM: Found '%s' in all symbols\n", symbolName); return true; } m_reader.Trace("SYM: Missed '%s' in all symbols\n", symbolName); } *symbolValue = 0; return false; } bool MachOModule::TryLookupSymbol(int start, int nsyms, const char* symbolName, uint64_t* symbolValue) { for (int i = 0; i < nsyms; i++) { std::string name = GetSymbolName(start + i); // Skip the leading underscores to match Linux externs const char* currentName = name.length() > 0 && name[0] == '_' ? name.c_str() + 1 : name.c_str(); // Does this symbol match? if (strcmp(currentName, symbolName) == 0) { *symbolValue = m_loadBias + m_nlists[start + i].n_value; return true; } } *symbolValue = 0; return false; } bool MachOModule::EnumerateSegments() { if (!ReadLoadCommands()) { return false; } _ASSERTE(!m_segments.empty()); for (const segment_command_64* segment : m_segments) { m_reader.VisitSegment(*this, *segment); const section_64* section = (section_64*)((uint64_t)segment + sizeof(segment_command_64)); for (int s = 0; s < segment->nsects; s++, section++) { m_reader.VisitSection(*this, *section); } } return true; } bool MachOModule::ReadLoadCommands() { if (m_commands == nullptr) { // Read load commands void* commandsAddress = (void*)(m_baseAddress + sizeof(mach_header_64)); m_commands = (load_command*)malloc(m_header.sizeofcmds); if (m_commands == nullptr) { m_reader.Trace("ERROR: Failed to allocate %d byte load commands\n", m_header.sizeofcmds); return false; } if (!m_reader.ReadMemory(commandsAddress, m_commands, m_header.sizeofcmds)) { m_reader.Trace("ERROR: Failed to read load commands at %p of %d\n", commandsAddress, m_header.sizeofcmds); return false; } load_command* command = m_commands; for (int i = 0; i < m_header.ncmds; i++) { m_reader.TraceVerbose("CMD: load command cmd %02x (%d) size %d\n", command->cmd, command->cmd, command->cmdsize); switch (command->cmd) { case LC_SYMTAB: m_symtabCommand = (symtab_command*)command; break; case LC_DYSYMTAB: m_dysymtabCommand = (dysymtab_command*)command; break; case LC_SEGMENT_64: segment_command_64* segment = (segment_command_64*)command; m_segments.push_back(segment); // Calculate the load bias for the module. This is the value to add to the vmaddr of a // segment to get the actual address. if (strcmp(segment->segname, SEG_TEXT) == 0) { m_loadBias = m_baseAddress - segment->vmaddr; } m_reader.TraceVerbose("CMD: vmaddr %016llx vmsize %016llx fileoff %016llx filesize %016llx nsects %d max %c%c%c init %c%c%c %02x %s\n", segment->vmaddr, segment->vmsize, segment->fileoff, segment->filesize, segment->nsects, (segment->maxprot & VM_PROT_READ) ? 'r' : '-', (segment->maxprot & VM_PROT_WRITE) ? 'w' : '-', (segment->maxprot & VM_PROT_EXECUTE) ? 'x' : '-', (segment->initprot & VM_PROT_READ) ? 'r' : '-', (segment->initprot & VM_PROT_WRITE) ? 'w' : '-', (segment->initprot & VM_PROT_EXECUTE) ? 'x' : '-', segment->flags, segment->segname); section_64* section = (section_64*)((uint64_t)segment + sizeof(segment_command_64)); for (int s = 0; s < segment->nsects; s++, section++) { m_reader.TraceVerbose(" addr %016llx size %016llx off %08x align %02x flags %02x %s\n", section->addr, section->size, section->offset, section->align, section->flags, section->sectname); } break; } // Get next load command command = (load_command*)((char*)command + command->cmdsize); } m_reader.TraceVerbose("CMD: load bias %016llx\n", m_loadBias); } return true; } bool MachOModule::ReadSymbolTable() { if (m_nlists == nullptr) { if (!ReadLoadCommands()) { return false; } _ASSERTE(m_symtabCommand != nullptr); _ASSERTE(m_strtabAddress == 0); m_reader.TraceVerbose("SYM: symoff %08x nsyms %d stroff %08x strsize %d iext %d next %d iundef %d nundef %d extref %d nextref %d\n", m_symtabCommand->symoff, m_symtabCommand->nsyms, m_symtabCommand->stroff, m_symtabCommand->strsize, m_dysymtabCommand->iextdefsym, m_dysymtabCommand->nextdefsym, m_dysymtabCommand->iundefsym, m_dysymtabCommand->nundefsym, m_dysymtabCommand->extrefsymoff, m_dysymtabCommand->nextrefsyms); // Read the entire symbol part of symbol table. An array of "nlist" structs. void* symbolTableAddress = (void*)GetAddressFromFileOffset(m_symtabCommand->symoff); size_t symtabSize = sizeof(nlist_64) * m_symtabCommand->nsyms; m_nlists = (nlist_64*)malloc(symtabSize); if (m_nlists == nullptr) { m_reader.Trace("ERROR: Failed to allocate %zu byte symtab\n", symtabSize); return false; } if (!m_reader.ReadMemory(symbolTableAddress, m_nlists, symtabSize)) { m_reader.Trace("ERROR: Failed to read symtab at %p of %zu\n", symbolTableAddress, symtabSize); return false; } // Save the symbol string table address. m_strtabAddress = GetAddressFromFileOffset(m_symtabCommand->stroff); } return true; } uint64_t MachOModule::GetAddressFromFileOffset(uint32_t offset) { _ASSERTE(!m_segments.empty()); for (const segment_command_64* segment : m_segments) { if (offset >= segment->fileoff && offset < (segment->fileoff + segment->filesize)) { return m_loadBias + offset + segment->vmaddr - segment->fileoff; } } return m_loadBias + offset; } std::string MachOModule::GetSymbolName(int index) { uint64_t symbolNameAddress = m_strtabAddress + m_nlists[index].n_un.n_strx; std::string result; while (true) { char c = 0; if (!m_reader.ReadMemory((void*)symbolNameAddress, &c, sizeof(char))) { m_reader.Trace("ERROR: Failed to read string table at %p\n", (void*)symbolNameAddress); break; } if (c == '\0') { break; } result.append(1, c); symbolNameAddress++; } return result; } //-------------------------------------------------------------------- // MachO reader //-------------------------------------------------------------------- MachOReader::MachOReader() { } bool MachOReader::EnumerateModules(mach_vm_address_t address, mach_header_64* header) { _ASSERTE(header->magic == MH_MAGIC_64); _ASSERTE(header->filetype == MH_DYLINKER); MachOModule dylinker(*this, address, header); // Search for symbol for the dyld image info cache uint64_t dyldInfoAddress = 0; if (!dylinker.TryLookupSymbol("dyld_all_image_infos", &dyldInfoAddress)) { Trace("ERROR: Can not find the _dyld_all_image_infos symbol\n"); return false; } // Read the all image info from the dylinker image dyld_all_image_infos dyldInfo; if (!ReadMemory((void*)dyldInfoAddress, &dyldInfo, sizeof(dyld_all_image_infos))) { Trace("ERROR: Failed to read dyld_all_image_infos at %p\n", (void*)dyldInfoAddress); return false; } std::string dylinkerPath; if (!ReadString(dyldInfo.dyldPath, dylinkerPath)) { Trace("ERROR: Failed to read name at %p\n", dyldInfo.dyldPath); return false; } dylinker.SetName(dylinkerPath); Trace("MOD: %016llx %08x %s\n", dylinker.BaseAddress(), dylinker.Header().flags, dylinker.Name().c_str()); VisitModule(dylinker); void* imageInfosAddress = (void*)dyldInfo.infoArray; size_t imageInfosSize = dyldInfo.infoArrayCount * sizeof(dyld_image_info); Trace("MOD: infoArray %p infoArrayCount %d\n", dyldInfo.infoArray, dyldInfo.infoArrayCount); ArrayHolder<dyld_image_info> imageInfos = new (std::nothrow) dyld_image_info[dyldInfo.infoArrayCount]; if (imageInfos == nullptr) { Trace("ERROR: Failed to allocate %zu byte image infos\n", imageInfosSize); return false; } if (!ReadMemory(imageInfosAddress, imageInfos, imageInfosSize)) { Trace("ERROR: Failed to read dyld_all_image_infos at %p\n", imageInfosAddress); return false; } for (int i = 0; i < dyldInfo.infoArrayCount; i++) { mach_vm_address_t imageAddress = (mach_vm_address_t)imageInfos[i].imageLoadAddress; const char* imageFilePathAddress = imageInfos[i].imageFilePath; std::string imagePath; if (!ReadString(imageFilePathAddress, imagePath)) { Trace("ERROR: Failed to read image name at %p\n", imageFilePathAddress); continue; } MachOModule module(*this, imageAddress, nullptr, &imagePath); if (!module.ReadHeader()) { continue; } Trace("MOD: %016llx %08x %s\n", imageAddress, module.Header().flags, imagePath.c_str()); VisitModule(module); } return true; } bool MachOReader::ReadString(const char* address, std::string& str) { for (int i = 0; i < MAX_LONGPATH; i++) { char c = 0; if (!ReadMemory((void*)(address + i), &c, sizeof(char))) { Trace("ERROR: Failed to read string at %p\n", (void*)(address + i)); return false; } if (c == '\0') { break; } str.append(1, c); } return true; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/mono/mono/arch/amd64/amd64-codegen.h
/* * amd64-codegen.h: Macros for generating amd64 code * * Authors: * Paolo Molaro ([email protected]) * Intel Corporation (ORP Project) * Sergey Chaban ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * Zalman Stern * * Copyright (C) 2000 Intel Corporation. All rights reserved. * Copyright (C) 2001, 2002 Ximian, Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef AMD64_H #define AMD64_H // Conventions in this file: // body: implementation. other macros call this one // disp: displacement // inst: instruction // is_half: short if true, byte if false (then why is it named is_half...?) // imm: immediate // mem: read from (immediate-supplied address?) // membase: read from address in a base register plus a displacement // memindex: SIP addressing: (address in base register) + (displacement in index register)<<(shift) // reg: register, encode modR/M bits 00 // regp: register, encode modR/M bits 11 // size: Expected 1,2,4 or 8 // widen: extends from 1 or 2 bytes #include <glib.h> typedef enum { AMD64_RAX = 0, AMD64_RCX = 1, AMD64_RDX = 2, AMD64_RBX = 3, AMD64_RSP = 4, AMD64_RBP = 5, AMD64_RSI = 6, AMD64_RDI = 7, AMD64_R8 = 8, AMD64_R9 = 9, AMD64_R10 = 10, AMD64_R11 = 11, AMD64_R12 = 12, AMD64_R13 = 13, AMD64_R14 = 14, AMD64_R15 = 15, AMD64_RIP = 16, AMD64_NREG } AMD64_Reg_No; typedef enum { AMD64_XMM0 = 0, AMD64_XMM1 = 1, AMD64_XMM2 = 2, AMD64_XMM3 = 3, AMD64_XMM4 = 4, AMD64_XMM5 = 5, AMD64_XMM6 = 6, AMD64_XMM7 = 7, AMD64_XMM8 = 8, AMD64_XMM9 = 9, AMD64_XMM10 = 10, AMD64_XMM11 = 11, AMD64_XMM12 = 12, AMD64_XMM13 = 13, AMD64_XMM14 = 14, AMD64_XMM15 = 15, AMD64_XMM_NREG = 16, } AMD64_XMM_Reg_No; typedef enum { AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; #define amd64_codegen_pre(inst) #define amd64_codegen_post(inst) #ifdef TARGET_WIN32 #define AMD64_ARG_REG1 AMD64_RCX #define AMD64_ARG_REG2 AMD64_RDX #define AMD64_ARG_REG3 AMD64_R8 #define AMD64_ARG_REG4 AMD64_R9 #else #define AMD64_ARG_REG1 AMD64_RDI #define AMD64_ARG_REG2 AMD64_RSI #define AMD64_ARG_REG3 AMD64_RDX #define AMD64_ARG_REG4 AMD64_RCX #endif #ifdef TARGET_WIN32 #define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10)) #define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg))) #define AMD64_ARGUMENT_REGS ((1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9)) #define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg))) /* xmm0-xmm3 for standard calling convention, additionally xmm4-xmm5 for __vectorcall (not currently used) */ #define AMD64_ARGUMENT_XREGS ((1<<AMD64_XMM0) | (1<<AMD64_XMM1) | (1<<AMD64_XMM2) | (1<<AMD64_XMM3) | (1<<AMD64_XMM4) | (1<<AMD64_XMM5)) #define AMD64_IS_ARGUMENT_XREG(reg) (AMD64_ARGUMENT_XREGS & (1 << (reg))) #define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP)) #define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg))) #else #define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10)) #define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg))) #define AMD64_ARGUMENT_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9)) #define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg))) #define AMD64_ARGUMENT_XREGS ((1<<AMD64_XMM0) | (1<<AMD64_XMM1) | (1<<AMD64_XMM2) | (1<<AMD64_XMM3) | (1<<AMD64_XMM4) | (1<<AMD64_XMM5) | (1<<AMD64_XMM6) | (1<<AMD64_XMM7)) #define AMD64_IS_ARGUMENT_XREG(reg) (AMD64_ARGUMENT_XREGS & (1 << (reg))) #define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP)) #define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg))) #endif #define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) #define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ { \ unsigned char _amd64_rex_bits = \ (((width) > 4) ? AMD64_REX_W : 0) | \ (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ (((reg_index) > 7) ? AMD64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ } while (0) typedef union { guint64 val; unsigned char b [8]; } amd64_imm_buf; #include "../x86/x86-codegen.h" /* In 64 bit mode, all registers have a low byte subregister */ #undef X86_IS_BYTE_REG #define X86_IS_BYTE_REG(reg) 1 #define amd64_modrm_mod(modrm) ((modrm) >> 6) #define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) #define amd64_modrm_rm(modrm) ((modrm) & 0x7) #define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) #define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) #define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) #define amd64_sib_scale(sib) ((sib) >> 6) #define amd64_sib_index(sib) (((sib) >> 3) & 0x7) #define amd64_sib_base(sib) ((sib) & 0x7) #define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ do { \ amd64_imm_buf imb; \ imb.val = (guint64) (imm); \ *(inst)++ = imb.b [0]; \ *(inst)++ = imb.b [1]; \ *(inst)++ = imb.b [2]; \ *(inst)++ = imb.b [3]; \ *(inst)++ = imb.b [4]; \ *(inst)++ = imb.b [5]; \ *(inst)++ = imb.b [6]; \ *(inst)++ = imb.b [7]; \ } while (0) #define amd64_membase_emit(inst,reg,basereg,disp) do { \ if ((basereg) == AMD64_RIP) { \ x86_address_byte ((inst), 0, (reg)&0x7, 5); \ x86_imm_emit32 ((inst), (disp)); \ } \ else \ x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ } while (0) #define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ do { \ if (x86_is_imm8((imm))) { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((reg) == AMD64_RAX) { \ amd64_emit_rex(inst, size, 0, 0, 0); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ x86_imm_emit32 ((inst), (imm)); \ } else { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x81; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) #define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ do { \ amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) #define amd64_test_reg_imm_size_body(inst,reg,imm,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ if ((reg) == AMD64_RAX) { \ *(inst)++ = (unsigned char)0xa9; \ } \ else { \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit((inst), 0, (reg)); \ } \ x86_imm_emit32((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) #define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) #define amd64_test_reg_imm_size(inst, reg, imm, size) \ amd64_test_reg_imm_size_body(inst, reg, imm, size) #define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) #define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) #define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) #define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ amd64_membase_emit (inst, reg, basereg, disp); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_regp_emit ((inst), (reg), (regp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_mem_reg(inst,mem,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_reg(inst,dreg,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_mem_body(inst,reg,mem,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_mem(inst,reg,mem,size) \ do { \ amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ } while (0) #define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ do { \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ } while (0) #define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) #define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ } while (0) #define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_mem(inst,reg,mem) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(reg),0,0); \ *(inst)++ = (unsigned char)0x63; \ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(reg),0,(basereg)); \ *(inst)++ = (unsigned char)0x63; \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_reg(inst,dreg,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(dreg),0,(reg)); \ *(inst)++ = (unsigned char)0x63; \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */ #define amd64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ x86_imm_emit64 ((inst), (guint64)(imm)); \ else \ x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_imm(inst,reg,imm) \ do { \ amd64_codegen_pre(inst); \ amd64_mov_reg_imm_size ((inst), (reg), (imm), (amd64_is_imm32 (((gint64)imm)) ? 4 : 8)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) #define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) #define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ amd64_codegen_post(inst); \ } while (0) #define amd64_lea_membase_body(inst,reg,basereg,disp,width) \ do { \ amd64_emit_rex(inst, width, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define amd64_lea4_membase(inst,reg,basereg,disp) \ amd64_lea_membase_body((inst), (reg), (basereg), (disp), 4) #define amd64_lea_membase(inst,reg,basereg,disp) \ amd64_lea_membase_body((inst), (reg), (basereg), (disp), 8) /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_reg(inst,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) /* Instruction is implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_membase(inst,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_pop_reg_body(inst,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_call_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) #define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) #define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) #define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) #define amd64_movsd_reg_regp(inst,reg,regp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_regp_reg(inst,regp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_regp(inst,reg,regp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_regp_reg(inst,regp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movdqu_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x6f; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movdqu_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x7f; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) /* The original inc_reg opcode is used as the REX prefix */ #define amd64_inc_reg_size(inst,reg,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),0,(reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_dec_reg_size(inst,reg,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),1,(reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),0,0,0,(basereg)); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ amd64_membase_emit ((inst), 0, (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* From the AMD64 Software Optimization Manual */ #define amd64_padding_size(inst,size) \ do { \ switch ((size)) { \ case 1: *(inst)++ = 0x90; break; \ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ }; \ } while (0) #define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { \ if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ x86_jump_code((inst),(target)); \ } else { \ amd64_jump_membase ((inst), AMD64_RIP, 0); \ *(guint64*)(inst) = (guint64)(target); \ (inst) += 8; \ } \ } while (0) /* * SSE */ //TODO Reorganize SSE opcode defines. /* Two opcode SSE defines */ #define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) #define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* Three opcode SSE defines */ #define emit_opcode3(inst,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ } while (0) #define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ amd64_codegen_pre(inst); \ *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) #define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* Four opcode SSE defines */ #define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ *(inst)++ = (unsigned char)(op4); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) #define emit_sse_reg_reg_op4_imm(inst,dreg,reg,op1,op2,op3,op4,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg_op4 ((inst), (dreg), (reg), (op1), (op2), (op3), (op4)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) /* specific SSE opcode defines */ #define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) #define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) #define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) #define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) #define amd64_sse_movss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x10) #define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) #define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) #define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) #define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) #define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) #define amd64_sse_comiss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x67,0x0f,0x2f) #define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) #define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) #define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) #define amd64_sse_cvtss2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2d, 8) #define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) #define amd64_sse_cvtss2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2c, (size)) #define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) #define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) #define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) #define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) #define amd64_sse_addss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x58) #define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) #define amd64_sse_subss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5c) #define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) #define amd64_sse_mulss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x59) #define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) #define amd64_sse_divss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5e) #define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) #define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) #define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) #define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) #define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) #define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) #define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) #define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) #define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) #define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) #define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) #define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) #define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) #define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) #define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) #define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) #define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) #define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) #define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) #define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) #define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) #define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) #define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) #define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) #define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) #define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) #define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) #define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) #define amd64_sse_roundpd_reg_reg_imm(inst, dreg, reg, imm) emit_sse_reg_reg_op4_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x3a, 0x09, (imm)) #define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) #define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) #define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) #define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) #define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) #define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) #define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) #define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) #define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) #define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) #define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) #define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) #define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) #define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) #define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) #define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) #define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) #define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) #define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) #define amd64_sse_pandn_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdf) #define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) #define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) #define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) #define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) #define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) #define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) #define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) #define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) #define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) #define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) #define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) #define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) #define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) #define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) #define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) #define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) #define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) #define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) #define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) #define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) #define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) #define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) #define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) #define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) #define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) #define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) #define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) #define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) #define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) #define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) #define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) #define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) #define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) #define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) #define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) #define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) #define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) #define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) #define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) #define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) #define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) #define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) #define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) #define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) #define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) #define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) #define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) #define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) #define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) #define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) #define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) #define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) #define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) #define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) #define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) #define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) #define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) #define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) #define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) #define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) #define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) #define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) #define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) #define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) #define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) #define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) #define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) #define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) #define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) #define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) #define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) #define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) #define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) #define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) #define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) #define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) #define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) #define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) #define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) #define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) #define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) #define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) #define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) #define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) #define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) #define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) #define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) #define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) #define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) #define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) #define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) #define amd64_sse_lzcnt_reg_reg_size(inst, dreg, reg, size) emit_sse_reg_reg_size((inst), (dreg), (reg), 0xf3, 0x0f, 0xbd, (size)) #define amd64_sse_popcnt_reg_reg_size(inst, dreg, reg, size) emit_sse_reg_reg_size((inst), (dreg), (reg), 0xf3, 0x0f, 0xb8, (size)) /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) #define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) #define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) #define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) #define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) #define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) #define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) #define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) #define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) #define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) #define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) #define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) #define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) #define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) //#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) #define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) #define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) #define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) #define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) //#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) #define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) //#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) #define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) #define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) #define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) #define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) /* Defined above for Native Client, so they can be used in other macros */ #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) #define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) //#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) #define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) //#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) #define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) #define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) #define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) #define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) #define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) #define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) #define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) #define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) #define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) #define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) #define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) #define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) //#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) #define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) #define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) #define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) #define amd64_cld(inst) amd64_cld_size(inst,8) #define amd64_stosb(inst) amd64_stosb_size(inst,8) #define amd64_stosl(inst) amd64_stosl_size(inst,8) #define amd64_stosd(inst) amd64_stosd_size(inst,8) #define amd64_movsb(inst) amd64_movsb_size(inst,8) #define amd64_movsl(inst) amd64_movsl_size(inst,8) #define amd64_movsd(inst) amd64_movsd_size(inst,8) #define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) #define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) #define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) #define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) #define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) #define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) #define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) #define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) #define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) #define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) #define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) #define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) #define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) #define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) #define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) #define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) #define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) #define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) #define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) #define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) #define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) #define amd64_nop(inst) amd64_nop_size(inst,8) //#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) #define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) #define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) #define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) #define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) //#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) #define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) #define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) #define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) //#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) #define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) #define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) #define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) #define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) #define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) #define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) #define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) #define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) #define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) #define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) #define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) #define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) #define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) #define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) #define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) #define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) #define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) #define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) #define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) #define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) #define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) #define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) #define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) #define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) #define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) #define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) #define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) //#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) //#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) //#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) //#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) //#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) //#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) #define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) #define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) //#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) #define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) //#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) #define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) #define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) //#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) #define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) #define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) #define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) #define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) #define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) #define amd64_cdq(inst) amd64_cdq_size(inst,8) #define amd64_wait(inst) amd64_wait_size(inst,8) #define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) #define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) #define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) #define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) #define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) #define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) #define amd64_fcompp(inst) amd64_fcompp_size(inst,8) #define amd64_fucompp(inst) amd64_fucompp_size(inst,8) #define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) #define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) #define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) #define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) #define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) #define amd64_fchs(inst) amd64_fchs_size(inst,8) #define amd64_frem(inst) amd64_frem_size(inst,8) #define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) #define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) #define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) #define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) #define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) #define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) #define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) #define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) #define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) #define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) #define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) #define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) #define amd64_fldz(inst) amd64_fldz_size(inst,8) #define amd64_fld1(inst) amd64_fld1_size(inst,8) #define amd64_fldpi(inst) amd64_fldpi_size(inst,8) #define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) #define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) #define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) #define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) #define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) #define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) #define amd64_fstsw(inst) amd64_fstsw_size(inst,8) #define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) //#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) #define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) #define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) //#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) #define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) #define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) //#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) #define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) #define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) #define amd64_pushad(inst) amd64_pushad_size(inst,8) #define amd64_pushfd(inst) amd64_pushfd_size(inst,8) #define amd64_popad(inst) amd64_popad_size(inst,8) #define amd64_popfd(inst) amd64_popfd_size(inst,8) #define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) #define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) #define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) #define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) #define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) #define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) #define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) #define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) #define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) #define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) #define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) #define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) #define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) #define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) #define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) #define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) #define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) #define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) //#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) #define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) #define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) #define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) //#define amd64_ret(inst) amd64_ret_size(inst,8) #define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) #define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) #define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) #define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) #define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) //#define amd64_leave(inst) amd64_leave_size(inst,8) #define amd64_sahf(inst) amd64_sahf_size(inst,8) #define amd64_fsin(inst) amd64_fsin_size(inst,8) #define amd64_fcos(inst) amd64_fcos_size(inst,8) #define amd64_fabs(inst) amd64_fabs_size(inst,8) #define amd64_ftst(inst) amd64_ftst_size(inst,8) #define amd64_fxam(inst) amd64_fxam_size(inst,8) #define amd64_fpatan(inst) amd64_fpatan_size(inst,8) #define amd64_fprem(inst) amd64_fprem_size(inst,8) #define amd64_fprem1(inst) amd64_fprem1_size(inst,8) #define amd64_frndint(inst) amd64_frndint_size(inst,8) #define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) #define amd64_fptan(inst) amd64_fptan_size(inst,8) #define amd64_padding(inst,size) amd64_padding_size(inst,size) #define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) #define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) #endif // AMD64_H
/* * amd64-codegen.h: Macros for generating amd64 code * * Authors: * Paolo Molaro ([email protected]) * Intel Corporation (ORP Project) * Sergey Chaban ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * Zalman Stern * * Copyright (C) 2000 Intel Corporation. All rights reserved. * Copyright (C) 2001, 2002 Ximian, Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef AMD64_H #define AMD64_H // Conventions in this file: // body: implementation. other macros call this one // disp: displacement // inst: instruction // is_half: short if true, byte if false (then why is it named is_half...?) // imm: immediate // mem: read from (immediate-supplied address?) // membase: read from address in a base register plus a displacement // memindex: SIP addressing: (address in base register) + (displacement in index register)<<(shift) // reg: register, encode modR/M bits 00 // regp: register, encode modR/M bits 11 // size: Expected 1,2,4 or 8 // widen: extends from 1 or 2 bytes #include <glib.h> typedef enum { AMD64_RAX = 0, AMD64_RCX = 1, AMD64_RDX = 2, AMD64_RBX = 3, AMD64_RSP = 4, AMD64_RBP = 5, AMD64_RSI = 6, AMD64_RDI = 7, AMD64_R8 = 8, AMD64_R9 = 9, AMD64_R10 = 10, AMD64_R11 = 11, AMD64_R12 = 12, AMD64_R13 = 13, AMD64_R14 = 14, AMD64_R15 = 15, AMD64_RIP = 16, AMD64_NREG } AMD64_Reg_No; typedef enum { AMD64_XMM0 = 0, AMD64_XMM1 = 1, AMD64_XMM2 = 2, AMD64_XMM3 = 3, AMD64_XMM4 = 4, AMD64_XMM5 = 5, AMD64_XMM6 = 6, AMD64_XMM7 = 7, AMD64_XMM8 = 8, AMD64_XMM9 = 9, AMD64_XMM10 = 10, AMD64_XMM11 = 11, AMD64_XMM12 = 12, AMD64_XMM13 = 13, AMD64_XMM14 = 14, AMD64_XMM15 = 15, AMD64_XMM_NREG = 16, } AMD64_XMM_Reg_No; typedef enum { AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; #define amd64_codegen_pre(inst) #define amd64_codegen_post(inst) #ifdef TARGET_WIN32 #define AMD64_ARG_REG1 AMD64_RCX #define AMD64_ARG_REG2 AMD64_RDX #define AMD64_ARG_REG3 AMD64_R8 #define AMD64_ARG_REG4 AMD64_R9 #else #define AMD64_ARG_REG1 AMD64_RDI #define AMD64_ARG_REG2 AMD64_RSI #define AMD64_ARG_REG3 AMD64_RDX #define AMD64_ARG_REG4 AMD64_RCX #endif #ifdef TARGET_WIN32 #define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10)) #define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg))) #define AMD64_ARGUMENT_REGS ((1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9)) #define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg))) /* xmm0-xmm3 for standard calling convention, additionally xmm4-xmm5 for __vectorcall (not currently used) */ #define AMD64_ARGUMENT_XREGS ((1<<AMD64_XMM0) | (1<<AMD64_XMM1) | (1<<AMD64_XMM2) | (1<<AMD64_XMM3) | (1<<AMD64_XMM4) | (1<<AMD64_XMM5)) #define AMD64_IS_ARGUMENT_XREG(reg) (AMD64_ARGUMENT_XREGS & (1 << (reg))) #define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP)) #define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg))) #else #define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10)) #define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg))) #define AMD64_ARGUMENT_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9)) #define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg))) #define AMD64_ARGUMENT_XREGS ((1<<AMD64_XMM0) | (1<<AMD64_XMM1) | (1<<AMD64_XMM2) | (1<<AMD64_XMM3) | (1<<AMD64_XMM4) | (1<<AMD64_XMM5) | (1<<AMD64_XMM6) | (1<<AMD64_XMM7)) #define AMD64_IS_ARGUMENT_XREG(reg) (AMD64_ARGUMENT_XREGS & (1 << (reg))) #define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP)) #define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg))) #endif #define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) #define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ { \ unsigned char _amd64_rex_bits = \ (((width) > 4) ? AMD64_REX_W : 0) | \ (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ (((reg_index) > 7) ? AMD64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ } while (0) typedef union { guint64 val; unsigned char b [8]; } amd64_imm_buf; #include "../x86/x86-codegen.h" /* In 64 bit mode, all registers have a low byte subregister */ #undef X86_IS_BYTE_REG #define X86_IS_BYTE_REG(reg) 1 #define amd64_modrm_mod(modrm) ((modrm) >> 6) #define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) #define amd64_modrm_rm(modrm) ((modrm) & 0x7) #define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) #define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) #define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) #define amd64_sib_scale(sib) ((sib) >> 6) #define amd64_sib_index(sib) (((sib) >> 3) & 0x7) #define amd64_sib_base(sib) ((sib) & 0x7) #define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ do { \ amd64_imm_buf imb; \ imb.val = (guint64) (imm); \ *(inst)++ = imb.b [0]; \ *(inst)++ = imb.b [1]; \ *(inst)++ = imb.b [2]; \ *(inst)++ = imb.b [3]; \ *(inst)++ = imb.b [4]; \ *(inst)++ = imb.b [5]; \ *(inst)++ = imb.b [6]; \ *(inst)++ = imb.b [7]; \ } while (0) #define amd64_membase_emit(inst,reg,basereg,disp) do { \ if ((basereg) == AMD64_RIP) { \ x86_address_byte ((inst), 0, (reg)&0x7, 5); \ x86_imm_emit32 ((inst), (disp)); \ } \ else \ x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ } while (0) #define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ do { \ if (x86_is_imm8((imm))) { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((reg) == AMD64_RAX) { \ amd64_emit_rex(inst, size, 0, 0, 0); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ x86_imm_emit32 ((inst), (imm)); \ } else { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x81; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) #define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ do { \ amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) #define amd64_test_reg_imm_size_body(inst,reg,imm,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ if ((reg) == AMD64_RAX) { \ *(inst)++ = (unsigned char)0xa9; \ } \ else { \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit((inst), 0, (reg)); \ } \ x86_imm_emit32((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) #define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) #define amd64_test_reg_imm_size(inst, reg, imm, size) \ amd64_test_reg_imm_size_body(inst, reg, imm, size) #define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) #define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) #define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) #define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ amd64_membase_emit (inst, reg, basereg, disp); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_regp_emit ((inst), (reg), (regp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_mem_reg(inst,mem,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_reg(inst,dreg,reg,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_mem_body(inst,reg,mem,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_mem(inst,reg,mem,size) \ do { \ amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ } while (0) #define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ do { \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ } while (0) #define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) #define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ } while (0) #define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_mem(inst,reg,mem) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(reg),0,0); \ *(inst)++ = (unsigned char)0x63; \ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(reg),0,(basereg)); \ *(inst)++ = (unsigned char)0x63; \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_reg(inst,dreg,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst,8,(dreg),0,(reg)); \ *(inst)++ = (unsigned char)0x63; \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */ #define amd64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ x86_imm_emit64 ((inst), (guint64)(imm)); \ else \ x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_imm(inst,reg,imm) \ do { \ amd64_codegen_pre(inst); \ amd64_mov_reg_imm_size ((inst), (reg), (imm), (amd64_is_imm32 (((gint64)imm)) ? 4 : 8)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) #define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) #define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ amd64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ amd64_codegen_post(inst); \ } while (0) #define amd64_lea_membase_body(inst,reg,basereg,disp,width) \ do { \ amd64_emit_rex(inst, width, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define amd64_lea4_membase(inst,reg,basereg,disp) \ amd64_lea_membase_body((inst), (reg), (basereg), (disp), 4) #define amd64_lea_membase(inst,reg,basereg,disp) \ amd64_lea_membase_body((inst), (reg), (basereg), (disp), 8) /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_reg(inst,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) /* Instruction is implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_membase(inst,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_pop_reg_body(inst,reg) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_call_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) #define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) #define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) #define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) #define amd64_movsd_reg_regp(inst,reg,regp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_regp_reg(inst,regp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_regp(inst,reg,regp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_regp_reg(inst,regp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movdqu_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x6f; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movdqu_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x7f; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) #define amd64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ amd64_codegen_post(inst); \ } while (0) /* The original inc_reg opcode is used as the REX prefix */ #define amd64_inc_reg_size(inst,reg,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),0,(reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_dec_reg_size(inst,reg,size) \ do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),1,(reg) & 0x7); \ amd64_codegen_post(inst); \ } while (0) #define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst),0,0,0,(basereg)); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ amd64_membase_emit ((inst), 0, (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* From the AMD64 Software Optimization Manual */ #define amd64_padding_size(inst,size) \ do { \ switch ((size)) { \ case 1: *(inst)++ = 0x90; break; \ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ }; \ } while (0) #define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { \ if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ x86_jump_code((inst),(target)); \ } else { \ amd64_jump_membase ((inst), AMD64_RIP, 0); \ *(guint64*)(inst) = (guint64)(target); \ (inst) += 8; \ } \ } while (0) /* * SSE */ //TODO Reorganize SSE opcode defines. /* Two opcode SSE defines */ #define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) #define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* Three opcode SSE defines */ #define emit_opcode3(inst,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ } while (0) #define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ amd64_codegen_pre(inst); \ *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) #define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ amd64_codegen_post(inst); \ } while (0) /* Four opcode SSE defines */ #define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ amd64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ *(inst)++ = (unsigned char)(op4); \ x86_reg_emit ((inst), (dreg), (reg)); \ amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) #define emit_sse_reg_reg_op4_imm(inst,dreg,reg,op1,op2,op3,op4,imm) do { \ amd64_codegen_pre(inst); \ emit_sse_reg_reg_op4 ((inst), (dreg), (reg), (op1), (op2), (op3), (op4)); \ x86_imm_emit8 ((inst), (imm)); \ amd64_codegen_post(inst); \ } while (0) /* specific SSE opcode defines */ #define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) #define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) #define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) #define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) #define amd64_sse_movss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x10) #define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) #define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) #define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) #define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) #define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) #define amd64_sse_comiss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x67,0x0f,0x2f) #define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) #define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) #define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) #define amd64_sse_cvtss2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2d, 8) #define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) #define amd64_sse_cvtss2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2c, (size)) #define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) #define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) #define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) #define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) #define amd64_sse_addss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x58) #define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) #define amd64_sse_subss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5c) #define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) #define amd64_sse_mulss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x59) #define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) #define amd64_sse_divss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5e) #define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) #define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) #define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) #define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) #define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) #define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) #define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) #define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) #define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) #define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) #define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) #define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) #define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) #define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) #define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) #define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) #define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) #define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) #define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) #define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) #define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) #define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) #define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) #define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) #define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) #define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) #define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) #define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) #define amd64_sse_roundpd_reg_reg_imm(inst, dreg, reg, imm) emit_sse_reg_reg_op4_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x3a, 0x09, (imm)) #define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) #define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) #define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) #define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) #define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) #define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) #define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) #define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) #define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) #define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) #define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) #define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) #define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) #define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) #define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) #define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) #define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) #define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) #define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) #define amd64_sse_pandn_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdf) #define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) #define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) #define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) #define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) #define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) #define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) #define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) #define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) #define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) #define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) #define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) #define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) #define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) #define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) #define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) #define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) #define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) #define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) #define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) #define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) #define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) #define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) #define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) #define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) #define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) #define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) #define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) #define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) #define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) #define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) #define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) #define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) #define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) #define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) #define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) #define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) #define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) #define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) #define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) #define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) #define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) #define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) #define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) #define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) #define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) #define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) #define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) #define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) #define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) #define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) #define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) #define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) #define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) #define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) #define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) #define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) #define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) #define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) #define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) #define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) #define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) #define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) #define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) #define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) #define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) #define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) #define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) #define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) #define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) #define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) #define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) #define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) #define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) #define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) #define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) #define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) #define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) #define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) #define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) #define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) #define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) #define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) #define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) #define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) #define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) #define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) #define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) #define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) #define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) #define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) #define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) #define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) #define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) #define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) #define amd64_sse_lzcnt_reg_reg_size(inst, dreg, reg, size) emit_sse_reg_reg_size((inst), (dreg), (reg), 0xf3, 0x0f, 0xbd, (size)) #define amd64_sse_popcnt_reg_reg_size(inst, dreg, reg, size) emit_sse_reg_reg_size((inst), (dreg), (reg), 0xf3, 0x0f, 0xb8, (size)) /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) #define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) #define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) #define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) #define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) #define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) //#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) #define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) #define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) //#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) #define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) #define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) #define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) #define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) #define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) #define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) #define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) #define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) #define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) #define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) //#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) #define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) #define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) #define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) #define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) #define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) #define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) #define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) #define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) //#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) //#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) #define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) //#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) #define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) #define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) #define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) #define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) /* Defined above for Native Client, so they can be used in other macros */ #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) #define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) #define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) //#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) #define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) #define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) //#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) #define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) #define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) #define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) #define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) #define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) #define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) #define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) #define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) #define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) #define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) #define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) #define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) //#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) #define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) #define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) #define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) #define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) #define amd64_cld(inst) amd64_cld_size(inst,8) #define amd64_stosb(inst) amd64_stosb_size(inst,8) #define amd64_stosl(inst) amd64_stosl_size(inst,8) #define amd64_stosd(inst) amd64_stosd_size(inst,8) #define amd64_movsb(inst) amd64_movsb_size(inst,8) #define amd64_movsl(inst) amd64_movsl_size(inst,8) #define amd64_movsd(inst) amd64_movsd_size(inst,8) #define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) #define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) #define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) #define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) #define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) #define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) #define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) #define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) #define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) #define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) #define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) #define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) #define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) #define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) #define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) #define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) #define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) #define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) #define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) #define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) #define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) #define amd64_nop(inst) amd64_nop_size(inst,8) //#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) #define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) #define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) #define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) #define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) //#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) #define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) #define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) #define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) //#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) #define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) #define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) #define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) #define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) #define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) #define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) #define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) #define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) #define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) #define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) #define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) #define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) #define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) #define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) #define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) #define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) #define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) #define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) #define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) #define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) #define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) #define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) #define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) #define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) #define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) #define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) #define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) //#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) //#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) //#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) //#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) //#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) //#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) #define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) #define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) //#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) #define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) //#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) #define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) #define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) //#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) #define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) #define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) #define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) #define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) #define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) #define amd64_cdq(inst) amd64_cdq_size(inst,8) #define amd64_wait(inst) amd64_wait_size(inst,8) #define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) #define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) #define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) #define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) #define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) #define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) #define amd64_fcompp(inst) amd64_fcompp_size(inst,8) #define amd64_fucompp(inst) amd64_fucompp_size(inst,8) #define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) #define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) #define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) #define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) #define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) #define amd64_fchs(inst) amd64_fchs_size(inst,8) #define amd64_frem(inst) amd64_frem_size(inst,8) #define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) #define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) #define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) #define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) #define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) #define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) #define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) #define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) #define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) #define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) #define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) #define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) #define amd64_fldz(inst) amd64_fldz_size(inst,8) #define amd64_fld1(inst) amd64_fld1_size(inst,8) #define amd64_fldpi(inst) amd64_fldpi_size(inst,8) #define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) #define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) #define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) #define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) #define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) #define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) #define amd64_fstsw(inst) amd64_fstsw_size(inst,8) #define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) //#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) #define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) #define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) //#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) #define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) #define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) //#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) #define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) #define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) #define amd64_pushad(inst) amd64_pushad_size(inst,8) #define amd64_pushfd(inst) amd64_pushfd_size(inst,8) #define amd64_popad(inst) amd64_popad_size(inst,8) #define amd64_popfd(inst) amd64_popfd_size(inst,8) #define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) #define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) #define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) #define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) #define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) #define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) #define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) #define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) #define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) #define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) #define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) #define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) #define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) #define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) #define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) #define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) #define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) #define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) //#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) #define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) #define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) #define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) //#define amd64_ret(inst) amd64_ret_size(inst,8) #define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) #define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) #define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) #define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) #define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) //#define amd64_leave(inst) amd64_leave_size(inst,8) #define amd64_sahf(inst) amd64_sahf_size(inst,8) #define amd64_fsin(inst) amd64_fsin_size(inst,8) #define amd64_fcos(inst) amd64_fcos_size(inst,8) #define amd64_fabs(inst) amd64_fabs_size(inst,8) #define amd64_ftst(inst) amd64_ftst_size(inst,8) #define amd64_fxam(inst) amd64_fxam_size(inst,8) #define amd64_fpatan(inst) amd64_fpatan_size(inst,8) #define amd64_fprem(inst) amd64_fprem_size(inst,8) #define amd64_fprem1(inst) amd64_fprem1_size(inst,8) #define amd64_frndint(inst) amd64_frndint_size(inst,8) #define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) #define amd64_fptan(inst) amd64_fptan_size(inst,8) #define amd64_padding(inst,size) amd64_padding_size(inst,size) #define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) #define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) #endif // AMD64_H
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/c_runtime/qsort/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: Calls qsort to sort a buffer, and verifies that it has done ** the job correctly. ** ** **==========================================================================*/ #include <palsuite.h> int __cdecl charcmp_qsort_test1(const void *pa, const void *pb) { return memcmp(pa, pb, 1); } PALTEST(c_runtime_qsort_test1_paltest_qsort_test1, "c_runtime/qsort/test1/paltest_qsort_test1") { char before[] = "cgaiehdbjf"; const char after[] = "abcdefghij"; if (PAL_Initialize(argc, argv)) { return FAIL; } qsort(before, sizeof(before) - 1, sizeof(char), charcmp_qsort_test1); if (memcmp(before, after, sizeof(before)) != 0) { Fail("qsort did not correctly sort an array of characters.\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: Calls qsort to sort a buffer, and verifies that it has done ** the job correctly. ** ** **==========================================================================*/ #include <palsuite.h> int __cdecl charcmp_qsort_test1(const void *pa, const void *pb) { return memcmp(pa, pb, 1); } PALTEST(c_runtime_qsort_test1_paltest_qsort_test1, "c_runtime/qsort/test1/paltest_qsort_test1") { char before[] = "cgaiehdbjf"; const char after[] = "abcdefghij"; if (PAL_Initialize(argc, argv)) { return FAIL; } qsort(before, sizeof(before) - 1, sizeof(char), charcmp_qsort_test1); if (memcmp(before, after, sizeof(before)) != 0) { Fail("qsort did not correctly sort an array of characters.\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/native/libs/System.Security.Cryptography.Native.Apple/pal_x509.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_digest.h" #include "pal_seckey.h" #include "pal_compiler.h" #include <pal_x509_types.h> #include <Security/Security.h> /* Given a handle, determine if it represents a SecCertificateRef, SecIdentityRef, or other. If the handle is a certificate or identity it is CFRetain()ed (and must later be CFRelease()d). Returns 1 if the handle was a certificate or identity, 0 otherwise (other values on invalid state). Output: pCertOut: If handle is a certificate, receives handle, otherwise NULL pIdentityut: If handle is an identity, receives handle, otherwise NULL */ PALEXPORT int32_t AppleCryptoNative_X509DemuxAndRetainHandle(CFTypeRef handle, SecCertificateRef* pCertOut, SecIdentityRef* pIdentityOut); /* Extract a SecKeyRef for the public key from the certificate handle. Returns 1 on success, 0 on failure, any other value on invalid state. Output: pPublicKeyOut: Receives a CFRetain()ed SecKeyRef for the public key pOSStatusOut: Receives the result of SecCertificateCopyKey or SecCertificateCopyPublicKey, depending on the OS version. */ PALEXPORT int32_t AppleCryptoNative_X509GetPublicKey(SecCertificateRef cert, SecKeyRef* pPublicKeyOut, int32_t* pOSStatusOut); /* Determines the data type of the provided input. Returns the data (format) type of the provided input, PAL_X509Unknown if it cannot be determined. */ PALEXPORT PAL_X509ContentType AppleCryptoNative_X509GetContentType(uint8_t* pbData, int32_t cbData); /* Extract a SecCertificateRef for the certificate from an identity handle. Returns the result of SecIdentityCopyCertificate. Output: pCertOut: Receives a SecCertificateRef for the certificate associated with the identity */ PALEXPORT int32_t AppleCryptoNative_X509CopyCertFromIdentity(SecIdentityRef identity, SecCertificateRef* pCertOut); /* Extract a SecKeyRef for the private key from an identity handle. Returns the result of SecIdentityCopyPrivateKey Output: pPrivateKeyOut: Receives a SecKeyRef for the private key associated with the identity */ PALEXPORT int32_t AppleCryptoNative_X509CopyPrivateKeyFromIdentity(SecIdentityRef identity, SecKeyRef* pPrivateKeyOut); /* Extract the DER encoded value of a certificate (public portion only). Returns 1 on success, 0 on failure, any other value indicates invalid state. Output: ppDataOut: Receives a CFDataRef with the exported blob pOSStatus: Receives the result of SecItemExport */ PALEXPORT int32_t AppleCryptoNative_X509GetRawData(SecCertificateRef cert, CFDataRef* ppDataOut, int32_t* pOSStatus); /* Extract a string that contains a human-readable summary of the contents of the certificate Returns 1 on success, 0 on failure, any other value indicates invalid state. Output: ppSummaryOut: Receives a CFDataRef with the exported blob */ PALEXPORT int32_t AppleCryptoNative_X509GetSubjectSummary(SecCertificateRef cert, CFStringRef* ppSummaryOut);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_digest.h" #include "pal_seckey.h" #include "pal_compiler.h" #include <pal_x509_types.h> #include <Security/Security.h> /* Given a handle, determine if it represents a SecCertificateRef, SecIdentityRef, or other. If the handle is a certificate or identity it is CFRetain()ed (and must later be CFRelease()d). Returns 1 if the handle was a certificate or identity, 0 otherwise (other values on invalid state). Output: pCertOut: If handle is a certificate, receives handle, otherwise NULL pIdentityut: If handle is an identity, receives handle, otherwise NULL */ PALEXPORT int32_t AppleCryptoNative_X509DemuxAndRetainHandle(CFTypeRef handle, SecCertificateRef* pCertOut, SecIdentityRef* pIdentityOut); /* Extract a SecKeyRef for the public key from the certificate handle. Returns 1 on success, 0 on failure, any other value on invalid state. Output: pPublicKeyOut: Receives a CFRetain()ed SecKeyRef for the public key pOSStatusOut: Receives the result of SecCertificateCopyKey or SecCertificateCopyPublicKey, depending on the OS version. */ PALEXPORT int32_t AppleCryptoNative_X509GetPublicKey(SecCertificateRef cert, SecKeyRef* pPublicKeyOut, int32_t* pOSStatusOut); /* Determines the data type of the provided input. Returns the data (format) type of the provided input, PAL_X509Unknown if it cannot be determined. */ PALEXPORT PAL_X509ContentType AppleCryptoNative_X509GetContentType(uint8_t* pbData, int32_t cbData); /* Extract a SecCertificateRef for the certificate from an identity handle. Returns the result of SecIdentityCopyCertificate. Output: pCertOut: Receives a SecCertificateRef for the certificate associated with the identity */ PALEXPORT int32_t AppleCryptoNative_X509CopyCertFromIdentity(SecIdentityRef identity, SecCertificateRef* pCertOut); /* Extract a SecKeyRef for the private key from an identity handle. Returns the result of SecIdentityCopyPrivateKey Output: pPrivateKeyOut: Receives a SecKeyRef for the private key associated with the identity */ PALEXPORT int32_t AppleCryptoNative_X509CopyPrivateKeyFromIdentity(SecIdentityRef identity, SecKeyRef* pPrivateKeyOut); /* Extract the DER encoded value of a certificate (public portion only). Returns 1 on success, 0 on failure, any other value indicates invalid state. Output: ppDataOut: Receives a CFDataRef with the exported blob pOSStatus: Receives the result of SecItemExport */ PALEXPORT int32_t AppleCryptoNative_X509GetRawData(SecCertificateRef cert, CFDataRef* ppDataOut, int32_t* pOSStatus); /* Extract a string that contains a human-readable summary of the contents of the certificate Returns 1 on success, 0 on failure, any other value indicates invalid state. Output: ppSummaryOut: Receives a CFDataRef with the exported blob */ PALEXPORT int32_t AppleCryptoNative_X509GetSubjectSummary(SecCertificateRef cert, CFStringRef* ppSummaryOut);
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/threading/QueueUserAPC/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests that APCs sent to a thread in an alertable state via ** QueueUserAPC are executed in FIFO order. Also tests that the APC ** function is executed within the context of the correct thread and ** that the dwData parameter gets sent correctly. ** ** **===================================================================*/ #include <palsuite.h> const int ChildThreadSleepTime = 2000; const int InterruptTime = 1000; VOID PALAPI APCFuncA(ULONG_PTR dwParam); VOID PALAPI APCFuncB(ULONG_PTR dwParam); VOID PALAPI APCFuncC(ULONG_PTR dwParam); VOID PALAPI APCFuncD(ULONG_PTR dwParam); DWORD PALAPI SleeperProc_QueueUserAPC_test1(LPVOID lpParameter); const char *ExpectedResults_QueueUserAPC_test1 = "A0B0C0D0A1B1C1D1A2B2C2D2A3B3C3D3"; char ResultBuffer_QueueUserAPC_test1[256]; char *ResultPtr_QueueUserAPC_test1; DWORD ChildThread_QueueUserAPC_test1; /* synchronization events */ static HANDLE hSyncEvent1_QueueUserAPC_test1 = NULL; static HANDLE hSyncEvent2_QueueUserAPC_test1 = NULL; /* thread result because we have no GetExitCodeThread() API */ BOOL bThreadResult_QueueUserAPC_test1 = FAIL; PALTEST(threading_QueueUserAPC_test1_paltest_queueuserapc_test1, "threading/QueueUserAPC/test1/paltest_queueuserapc_test1") { HANDLE hThread = NULL; int ret; int i,j; BOOL bResult = FAIL; PAPCFUNC APCFuncs[] = { APCFuncA, APCFuncB, APCFuncC, APCFuncD, }; /* initialize the PAL */ if (0 != (PAL_Initialize(argc, argv))) { return FAIL; } ResultPtr_QueueUserAPC_test1 = ResultBuffer_QueueUserAPC_test1; /* create a pair of synchronization events to coordinate our threads */ hSyncEvent1_QueueUserAPC_test1 = CreateEvent( NULL, FALSE, FALSE, NULL ); if( hSyncEvent1_QueueUserAPC_test1 == NULL ) { Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); goto cleanup; } hSyncEvent2_QueueUserAPC_test1 = CreateEvent( NULL, FALSE, FALSE, NULL ); if( hSyncEvent2_QueueUserAPC_test1 == NULL ) { Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); goto cleanup; } /* create a child thread which will call SleepEx */ hThread = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)SleeperProc_QueueUserAPC_test1, 0, 0, &ChildThread_QueueUserAPC_test1); if( hThread == NULL ) { Trace( "ERROR:%lu:CreateThread() call failed\n", GetLastError()); goto cleanup; } /* wait on our synchronization event to ensure the thread is running */ ret = WaitForSingleObject( hSyncEvent1_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); goto cleanup; } /* queue our user APC functions on the thread */ for (i=0; i<4; i++) { for (j=0; j<sizeof(APCFuncs)/sizeof(APCFuncs[0]); j++) { ret = QueueUserAPC(APCFuncs[j], hThread, '0' + i); if (ret == 0) { Trace( "ERROR:%lu:QueueUserAPC() call failed\n", GetLastError()); goto cleanup; } } } /* signal the child thread to continue */ if( ! SetEvent( hSyncEvent2_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); goto cleanup; } /* wait on our synchronization event to ensure the other thread is done */ ret = WaitForSingleObject( hSyncEvent1_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); goto cleanup; } /* check that the thread executed successfully */ if( bThreadResult_QueueUserAPC_test1 == FAIL ) { goto cleanup; } /* check the result buffer */ if (strcmp(ExpectedResults_QueueUserAPC_test1, ResultBuffer_QueueUserAPC_test1) != 0) { Trace( "FAIL:Expected the APC function calls to produce a result of " " \"%s\", got \"%s\"\n", ExpectedResults_QueueUserAPC_test1, ResultBuffer_QueueUserAPC_test1 ); goto cleanup; } /* success if we get here */ bResult = PASS; cleanup: /* wait for the other thread to finish */ if( hThread != NULL ) { ret = WaitForSingleObject( hThread, INFINITE ); if (ret == WAIT_FAILED) { Trace( "ERROR:%lu:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); bResult = FAIL; } } /* close our synchronization handles */ if( hSyncEvent1_QueueUserAPC_test1 != NULL ) { if( ! CloseHandle( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); bResult = FAIL; } } if( hSyncEvent2_QueueUserAPC_test1 != NULL ) { if( ! CloseHandle( hSyncEvent2_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); bResult = FAIL; } } if( bResult == FAIL ) { Fail( "test failed\n" ); } /* terminate the PAL */ PAL_Terminate(); /* return success */ return PASS; } VOID PALAPI APCFuncA(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'A'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncB(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'B'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncC(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'C'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncD(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'D'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } /* Entry Point for child thread. All it does is call SleepEx. */ DWORD PALAPI SleeperProc_QueueUserAPC_test1(LPVOID lpParameter) { DWORD ret; /* signal the main thread that we're ready to proceed */ if( ! SetEvent( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* wait for notification from the main thread */ ret = WaitForSingleObject( hSyncEvent2_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* call SleepEx to activate any queued APCs */ ret = SleepEx(ChildThreadSleepTime, TRUE); if (ret != WAIT_IO_COMPLETION) { Trace( "ERROR:SleepEx() call returned %lu, " "expected WAIT_IO_COMPLETION\n", ret ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* everything passed here */ bThreadResult_QueueUserAPC_test1 = PASS; done: /* signal the main thread that we're finished */ if( ! SetEvent( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); bThreadResult_QueueUserAPC_test1 = FAIL; } /* return success or failure */ return bThreadResult_QueueUserAPC_test1; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests that APCs sent to a thread in an alertable state via ** QueueUserAPC are executed in FIFO order. Also tests that the APC ** function is executed within the context of the correct thread and ** that the dwData parameter gets sent correctly. ** ** **===================================================================*/ #include <palsuite.h> const int ChildThreadSleepTime = 2000; const int InterruptTime = 1000; VOID PALAPI APCFuncA(ULONG_PTR dwParam); VOID PALAPI APCFuncB(ULONG_PTR dwParam); VOID PALAPI APCFuncC(ULONG_PTR dwParam); VOID PALAPI APCFuncD(ULONG_PTR dwParam); DWORD PALAPI SleeperProc_QueueUserAPC_test1(LPVOID lpParameter); const char *ExpectedResults_QueueUserAPC_test1 = "A0B0C0D0A1B1C1D1A2B2C2D2A3B3C3D3"; char ResultBuffer_QueueUserAPC_test1[256]; char *ResultPtr_QueueUserAPC_test1; DWORD ChildThread_QueueUserAPC_test1; /* synchronization events */ static HANDLE hSyncEvent1_QueueUserAPC_test1 = NULL; static HANDLE hSyncEvent2_QueueUserAPC_test1 = NULL; /* thread result because we have no GetExitCodeThread() API */ BOOL bThreadResult_QueueUserAPC_test1 = FAIL; PALTEST(threading_QueueUserAPC_test1_paltest_queueuserapc_test1, "threading/QueueUserAPC/test1/paltest_queueuserapc_test1") { HANDLE hThread = NULL; int ret; int i,j; BOOL bResult = FAIL; PAPCFUNC APCFuncs[] = { APCFuncA, APCFuncB, APCFuncC, APCFuncD, }; /* initialize the PAL */ if (0 != (PAL_Initialize(argc, argv))) { return FAIL; } ResultPtr_QueueUserAPC_test1 = ResultBuffer_QueueUserAPC_test1; /* create a pair of synchronization events to coordinate our threads */ hSyncEvent1_QueueUserAPC_test1 = CreateEvent( NULL, FALSE, FALSE, NULL ); if( hSyncEvent1_QueueUserAPC_test1 == NULL ) { Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); goto cleanup; } hSyncEvent2_QueueUserAPC_test1 = CreateEvent( NULL, FALSE, FALSE, NULL ); if( hSyncEvent2_QueueUserAPC_test1 == NULL ) { Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); goto cleanup; } /* create a child thread which will call SleepEx */ hThread = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)SleeperProc_QueueUserAPC_test1, 0, 0, &ChildThread_QueueUserAPC_test1); if( hThread == NULL ) { Trace( "ERROR:%lu:CreateThread() call failed\n", GetLastError()); goto cleanup; } /* wait on our synchronization event to ensure the thread is running */ ret = WaitForSingleObject( hSyncEvent1_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); goto cleanup; } /* queue our user APC functions on the thread */ for (i=0; i<4; i++) { for (j=0; j<sizeof(APCFuncs)/sizeof(APCFuncs[0]); j++) { ret = QueueUserAPC(APCFuncs[j], hThread, '0' + i); if (ret == 0) { Trace( "ERROR:%lu:QueueUserAPC() call failed\n", GetLastError()); goto cleanup; } } } /* signal the child thread to continue */ if( ! SetEvent( hSyncEvent2_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); goto cleanup; } /* wait on our synchronization event to ensure the other thread is done */ ret = WaitForSingleObject( hSyncEvent1_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); goto cleanup; } /* check that the thread executed successfully */ if( bThreadResult_QueueUserAPC_test1 == FAIL ) { goto cleanup; } /* check the result buffer */ if (strcmp(ExpectedResults_QueueUserAPC_test1, ResultBuffer_QueueUserAPC_test1) != 0) { Trace( "FAIL:Expected the APC function calls to produce a result of " " \"%s\", got \"%s\"\n", ExpectedResults_QueueUserAPC_test1, ResultBuffer_QueueUserAPC_test1 ); goto cleanup; } /* success if we get here */ bResult = PASS; cleanup: /* wait for the other thread to finish */ if( hThread != NULL ) { ret = WaitForSingleObject( hThread, INFINITE ); if (ret == WAIT_FAILED) { Trace( "ERROR:%lu:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); bResult = FAIL; } } /* close our synchronization handles */ if( hSyncEvent1_QueueUserAPC_test1 != NULL ) { if( ! CloseHandle( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); bResult = FAIL; } } if( hSyncEvent2_QueueUserAPC_test1 != NULL ) { if( ! CloseHandle( hSyncEvent2_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); bResult = FAIL; } } if( bResult == FAIL ) { Fail( "test failed\n" ); } /* terminate the PAL */ PAL_Terminate(); /* return success */ return PASS; } VOID PALAPI APCFuncA(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'A'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncB(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'B'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncC(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'C'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } VOID PALAPI APCFuncD(ULONG_PTR dwParam) { char val = (int) dwParam; if (GetCurrentThreadId() != ChildThread_QueueUserAPC_test1) { Fail("Executing APC in thread %d, should be in %d!\n", GetCurrentThreadId(), ChildThread_QueueUserAPC_test1); } *ResultPtr_QueueUserAPC_test1++ = 'D'; *ResultPtr_QueueUserAPC_test1++ = val; *ResultPtr_QueueUserAPC_test1 = 0; } /* Entry Point for child thread. All it does is call SleepEx. */ DWORD PALAPI SleeperProc_QueueUserAPC_test1(LPVOID lpParameter) { DWORD ret; /* signal the main thread that we're ready to proceed */ if( ! SetEvent( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* wait for notification from the main thread */ ret = WaitForSingleObject( hSyncEvent2_QueueUserAPC_test1, 20000 ); if( ret != WAIT_OBJECT_0 ) { Trace( "ERROR:WaitForSingleObject() returned %lu, " "expected WAIT_OBJECT_0\n", ret ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* call SleepEx to activate any queued APCs */ ret = SleepEx(ChildThreadSleepTime, TRUE); if (ret != WAIT_IO_COMPLETION) { Trace( "ERROR:SleepEx() call returned %lu, " "expected WAIT_IO_COMPLETION\n", ret ); bThreadResult_QueueUserAPC_test1 = FAIL; goto done; } /* everything passed here */ bThreadResult_QueueUserAPC_test1 = PASS; done: /* signal the main thread that we're finished */ if( ! SetEvent( hSyncEvent1_QueueUserAPC_test1 ) ) { Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() ); bThreadResult_QueueUserAPC_test1 = FAIL; } /* return success or failure */ return bThreadResult_QueueUserAPC_test1; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/vm/inlinetracking.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================================================= // Code for tracking method inlinings in NGen and R2R images. // The only information stored is "who" got inlined "where", no offsets or inlining depth tracking. // (No good for debugger yet.) // This information is later exposed to profilers and can be useful for ReJIT. // Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway. // ============================================================================================= #include "common.h" #include "inlinetracking.h" #include "ceeload.h" #include "versionresilienthashcode.h" using namespace NativeFormat; #ifndef DACCESS_COMPILE bool MethodInModule::operator <(const MethodInModule& other) const { STANDARD_VM_CONTRACT; if (m_module == other.m_module) { return m_methodDef < other.m_methodDef; } else { // Since NGen images are supposed to be determenistic, // we need stable sort order that isn't changing between different runs // That's why we use names and GUIDs instead of just doing m_module < other.m_module // First we try to compare simple names (should be fast enough) LPCUTF8 simpleName = m_module ? m_module->GetSimpleName() : ""; LPCUTF8 otherSimpleName = other.m_module ? other.m_module->GetSimpleName() : ""; int nameCmpResult = strcmp(simpleName, otherSimpleName); if (nameCmpResult == 0) { // Names are equal but module addresses aren't, it's suspicious // falling back to module GUIDs GUID thisGuid, otherGuid; if (m_module == NULL) { memset(&thisGuid, 0, sizeof(GUID)); } else { m_module->GetPEAssembly()->GetMVID(&thisGuid); } if (other.m_module == NULL) { memset(&otherGuid, 0, sizeof(GUID)); } else { other.m_module->GetPEAssembly()->GetMVID(&otherGuid); } return memcmp(&thisGuid, &otherGuid, sizeof(GUID)) < 0; } else { return nameCmpResult < 0; } } } bool MethodInModule::operator ==(const MethodInModule& other) const { LIMITED_METHOD_DAC_CONTRACT; return m_methodDef == other.m_methodDef && m_module == other.m_module; } bool MethodInModule::operator !=(const MethodInModule& other) const { LIMITED_METHOD_DAC_CONTRACT; return m_methodDef != other.m_methodDef || m_module != other.m_module; } void InlineTrackingEntry::SortAndDeduplicate() { STANDARD_VM_CONTRACT; //Sort MethodInModule *begin = &m_inliners[0]; MethodInModule *end = begin + m_inliners.GetCount(); util::sort(begin, end); //Deduplicate MethodInModule *left = begin; MethodInModule *right = left + 1; while (right < end) { auto rvalue = *right; if (*left != rvalue) { left++; if (left != right) { *left = rvalue; } } right++; } //Shrink int newCount = (int)(left - begin + 1); m_inliners.SetCount(newCount); } InlineTrackingEntry::InlineTrackingEntry(const InlineTrackingEntry& other) :m_inlinee(other.m_inlinee) { STANDARD_VM_CONTRACT; m_inliners.Set(other.m_inliners); } InlineTrackingEntry & InlineTrackingEntry::operator = (const InlineTrackingEntry &other) { STANDARD_VM_CONTRACT; m_inlinee = other.m_inlinee; m_inliners.Set(other.m_inliners); return *this; } void InlineTrackingEntry::Add(PTR_MethodDesc inliner) { STANDARD_VM_CONTRACT; MethodInModule method(inliner->GetModule(), inliner->GetMemberDef()); // Going through last 10 inliners to check if a given inliner has recently been registered. // It allows to filter out most duplicates without having to scan through hundreds of inliners // for methods like Object.ctor or Monitor.Enter. // We are OK to keep occasional duplicates in m_inliners, we'll get rid of them // in SortAndDeduplicate() anyway. int count = static_cast<int>(m_inliners.GetCount()); int start = max(0, count - 10); for (int i = count - 1; i >= start; i--) { if (m_inliners[i] == method) return; } //look like we see this inliner for the first time, add it to the collection m_inliners.Append(method); } InlineTrackingMap::InlineTrackingMap() : m_mapCrst(CrstInlineTrackingMap) { STANDARD_VM_CONTRACT; } void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee) { STANDARD_VM_CONTRACT; _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); MethodInModule inlineeMnM(inlinee->GetModule(), inlinee->GetMemberDef()); if (RidFromToken(inlineeMnM.m_methodDef) == 0 || RidFromToken(inliner->GetMemberDef()) == 0) { // Sometimes we do see methods that don't have valid tokens (stubs etc) // we just ignore them. return; } CrstHolder lock(&m_mapCrst); InlineTrackingEntry *existingEntry = const_cast<InlineTrackingEntry *>(LookupPtr(inlineeMnM)); if (existingEntry) { // We saw this inlinee before, just add one more inliner existingEntry->Add(inliner); } else { // We haven't seen this inlinee before, create a new record in the hashtable // and add a first inliner to it. InlineTrackingEntry newEntry; newEntry.m_inlinee = inlineeMnM; newEntry.Add(inliner); Add(newEntry); } } #endif //!DACCESS_COMPILE #ifdef FEATURE_READYTORUN struct InliningHeader { int SizeOfInlineeIndex; }; #ifndef DACCESS_COMPILE BOOL PersistentInlineTrackingMapR2R::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer, AllocMemTracker *pamTracker, PersistentInlineTrackingMapR2R** ppLoadedMap) { InliningHeader* pHeader = (InliningHeader*)pBuffer; if (pHeader->SizeOfInlineeIndex > (int)(cbBuffer - sizeof(InliningHeader))) { //invalid serialized data, the index can't be larger the entire block _ASSERTE(!"R2R image is invalid or there is a bug in the R2R parser"); return FALSE; } //NOTE: Error checking on the format is very limited at this point. //We trust the image format is valid and this initial check is a cheap //verification that may help catch simple bugs. It does not secure against //a deliberately maliciously formed binary. LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap(); void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R))); PersistentInlineTrackingMapR2R* pMap = new (pMemory) PersistentInlineTrackingMapR2R(); pMap->m_module = pModule; pMap->m_inlineeIndex = (PTR_ZapInlineeRecord)(pHeader + 1); pMap->m_inlineeIndexSize = pHeader->SizeOfInlineeIndex / sizeof(ZapInlineeRecord); pMap->m_inlinersBuffer = ((PTR_BYTE)(pHeader+1)) + pHeader->SizeOfInlineeIndex; pMap->m_inlinersBufferSize = cbBuffer - sizeof(InliningHeader) - pMap->m_inlineeIndexSize; *ppLoadedMap = pMap; return TRUE; } #endif //!DACCESS_COMPILE COUNT_T PersistentInlineTrackingMapR2R::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(inlineeOwnerMod); _ASSERTE(inliners != NULL || inlinersSize == 0); if (incompleteData) { *incompleteData = FALSE; } if (m_inlineeIndex == NULL || m_inlinersBuffer == NULL) { //No inlines saved in this image. return 0; } if(inlineeOwnerMod != m_module) { // no cross module inlining (yet?) return 0; } // Binary search to find all records matching (inlineeTkn) ZapInlineeRecord probeRecord; probeRecord.InitForR2R(RidFromToken(inlineeTkn)); ZapInlineeRecord *begin = m_inlineeIndex; ZapInlineeRecord *end = m_inlineeIndex + m_inlineeIndexSize; ZapInlineeRecord *foundRecord = util::lower_bound(begin, end, probeRecord); DWORD result = 0; DWORD outputIndex = 0; // Go through all matching records for (; foundRecord < end && *foundRecord == probeRecord; foundRecord++) { DWORD offset = foundRecord->m_offset; NibbleReader stream(m_inlinersBuffer + offset, m_inlinersBufferSize - offset); Module *inlinerModule = m_module; DWORD inlinersCount = stream.ReadEncodedU32(); _ASSERTE(inlinersCount > 0); RID inlinerRid = 0; // Reading inliner RIDs one by one, each RID is represented as an adjustment (diff) to the previous one. // Adding inliners module and coping to the output buffer for (DWORD i = 0; i < inlinersCount && outputIndex < inlinersSize; i++) { inlinerRid += stream.ReadEncodedU32(); mdMethodDef inlinerTkn = TokenFromRid(inlinerRid, mdtMethodDef); inliners[outputIndex++] = MethodInModule(inlinerModule, inlinerTkn); } result += inlinersCount; } return result; } #ifndef DACCESS_COMPILE BOOL PersistentInlineTrackingMapR2R2::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer, AllocMemTracker* pamTracker, PersistentInlineTrackingMapR2R2** ppLoadedMap) { LoaderHeap* pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap(); void* pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R2))); PersistentInlineTrackingMapR2R2* pMap = new (pMemory) PersistentInlineTrackingMapR2R2(); pMap->m_module = pModule; pMap->m_reader = NativeReader(pBuffer, cbBuffer); NativeParser parser = NativeParser(&pMap->m_reader, 0); pMap->m_hashtable = NativeHashtable(parser); *ppLoadedMap = pMap; return TRUE; } COUNT_T PersistentInlineTrackingMapR2R2::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL* incompleteData) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(inlineeOwnerMod); _ASSERTE(inliners != NULL || inlinersSize == 0); if (incompleteData) { *incompleteData = FALSE; } DWORD result = 0; int hashCode = GetVersionResilientModuleHashCode(inlineeOwnerMod); hashCode ^= inlineeTkn; NativeHashtable::Enumerator lookup = m_hashtable.Lookup(hashCode); NativeParser entryParser; while (lookup.GetNext(entryParser)) { DWORD streamSize = entryParser.GetUnsigned(); _ASSERTE(streamSize > 1); // First make sure this is the right inlinee and not just a hash collision DWORD inlineeRidAndFlag = entryParser.GetUnsigned(); streamSize--; mdMethodDef inlineeToken = TokenFromRid(inlineeRidAndFlag >> 1, mdtMethodDef); if (inlineeToken != inlineeTkn) { continue; } Module* inlineeModule; if ((inlineeRidAndFlag & 1) != 0) { inlineeModule = GetModuleByIndex(entryParser.GetUnsigned()); streamSize--; _ASSERTE(streamSize > 0); } else { inlineeModule = m_module; } if (inlineeModule != inlineeOwnerMod) { continue; } // We have the right inlinee, let's look at the inliners DWORD currentInlinerRid = 0; do { DWORD inlinerRidDeltaAndFlag = entryParser.GetUnsigned(); streamSize--; currentInlinerRid += inlinerRidDeltaAndFlag >> 1; Module* inlinerModule; if ((inlinerRidDeltaAndFlag & 1) != 0) { _ASSERTE(streamSize > 0); inlinerModule = GetModuleByIndex(entryParser.GetUnsigned()); streamSize--; if (inlinerModule == nullptr && incompleteData) { // We can't find module for this inlineeModuleZapIndex, it means it hasn't been loaded yet // (maybe it never will be), we just report it to the profiler. // Profiler might want to try later when more modules are loaded. *incompleteData = TRUE; continue; } } else { inlinerModule = m_module; } if (result < inlinersSize) { inliners[result].m_methodDef = TokenFromRid(currentInlinerRid, mdtMethodDef); inliners[result].m_module = inlinerModule; } result++; } while (streamSize > 0); } return result; } Module* PersistentInlineTrackingMapR2R2::GetModuleByIndex(DWORD index) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // This "black magic spell" has in fact nothing to do with GenericInstantiationCompare per se, but just sets a thread flag // that later activates more thorough search inside Module::GetAssemblyIfLoaded, which is indirectly called from GetModuleFromIndexIfLoaded. // This is useful when ngen image was compiler against a different assembly version than the one loaded now. ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare); return m_module->GetModuleFromIndexIfLoaded(index); } #endif //!DACCESS_COMPILE #endif //FEATURE_READYTORUN #if !defined(DACCESS_COMPILE) JITInlineTrackingMap::JITInlineTrackingMap(LoaderAllocator *pAssociatedLoaderAllocator) : m_mapCrst(CrstJitInlineTrackingMap), m_map() { LIMITED_METHOD_CONTRACT; m_map.Init(pAssociatedLoaderAllocator); } BOOL JITInlineTrackingMap::InliningExistsDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; _ASSERTE(m_mapCrst.OwnedByCurrentThread()); _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); BOOL found = FALSE; auto lambda = [&](OBJECTREF obj, MethodDesc *lambdaInlinee, MethodDesc *lambdaInliner) { _ASSERTE(inlinee == lambdaInlinee); if (lambdaInliner == inliner) { found = TRUE; return false; } return true; }; m_map.VisitValuesOfKey(inlinee, lambda); return found; } void JITInlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; inlinee = inlinee->LoadTypicalMethodDefinition(); CrstHolder holder(&m_mapCrst); AddInliningDontTakeLock(inliner, inlinee); } void JITInlineTrackingMap::AddInliningDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; _ASSERTE(m_mapCrst.OwnedByCurrentThread()); _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); GCX_COOP(); if (!InliningExistsDontTakeLock(inliner, inlinee)) { LoaderAllocator *loaderAllocatorOfInliner = inliner->GetLoaderAllocator(); m_map.Add(inlinee, inliner, loaderAllocatorOfInliner); } } #endif // !defined(DACCESS_COMPILE)
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================================================= // Code for tracking method inlinings in NGen and R2R images. // The only information stored is "who" got inlined "where", no offsets or inlining depth tracking. // (No good for debugger yet.) // This information is later exposed to profilers and can be useful for ReJIT. // Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway. // ============================================================================================= #include "common.h" #include "inlinetracking.h" #include "ceeload.h" #include "versionresilienthashcode.h" using namespace NativeFormat; #ifndef DACCESS_COMPILE bool MethodInModule::operator <(const MethodInModule& other) const { STANDARD_VM_CONTRACT; if (m_module == other.m_module) { return m_methodDef < other.m_methodDef; } else { // Since NGen images are supposed to be determenistic, // we need stable sort order that isn't changing between different runs // That's why we use names and GUIDs instead of just doing m_module < other.m_module // First we try to compare simple names (should be fast enough) LPCUTF8 simpleName = m_module ? m_module->GetSimpleName() : ""; LPCUTF8 otherSimpleName = other.m_module ? other.m_module->GetSimpleName() : ""; int nameCmpResult = strcmp(simpleName, otherSimpleName); if (nameCmpResult == 0) { // Names are equal but module addresses aren't, it's suspicious // falling back to module GUIDs GUID thisGuid, otherGuid; if (m_module == NULL) { memset(&thisGuid, 0, sizeof(GUID)); } else { m_module->GetPEAssembly()->GetMVID(&thisGuid); } if (other.m_module == NULL) { memset(&otherGuid, 0, sizeof(GUID)); } else { other.m_module->GetPEAssembly()->GetMVID(&otherGuid); } return memcmp(&thisGuid, &otherGuid, sizeof(GUID)) < 0; } else { return nameCmpResult < 0; } } } bool MethodInModule::operator ==(const MethodInModule& other) const { LIMITED_METHOD_DAC_CONTRACT; return m_methodDef == other.m_methodDef && m_module == other.m_module; } bool MethodInModule::operator !=(const MethodInModule& other) const { LIMITED_METHOD_DAC_CONTRACT; return m_methodDef != other.m_methodDef || m_module != other.m_module; } void InlineTrackingEntry::SortAndDeduplicate() { STANDARD_VM_CONTRACT; //Sort MethodInModule *begin = &m_inliners[0]; MethodInModule *end = begin + m_inliners.GetCount(); util::sort(begin, end); //Deduplicate MethodInModule *left = begin; MethodInModule *right = left + 1; while (right < end) { auto rvalue = *right; if (*left != rvalue) { left++; if (left != right) { *left = rvalue; } } right++; } //Shrink int newCount = (int)(left - begin + 1); m_inliners.SetCount(newCount); } InlineTrackingEntry::InlineTrackingEntry(const InlineTrackingEntry& other) :m_inlinee(other.m_inlinee) { STANDARD_VM_CONTRACT; m_inliners.Set(other.m_inliners); } InlineTrackingEntry & InlineTrackingEntry::operator = (const InlineTrackingEntry &other) { STANDARD_VM_CONTRACT; m_inlinee = other.m_inlinee; m_inliners.Set(other.m_inliners); return *this; } void InlineTrackingEntry::Add(PTR_MethodDesc inliner) { STANDARD_VM_CONTRACT; MethodInModule method(inliner->GetModule(), inliner->GetMemberDef()); // Going through last 10 inliners to check if a given inliner has recently been registered. // It allows to filter out most duplicates without having to scan through hundreds of inliners // for methods like Object.ctor or Monitor.Enter. // We are OK to keep occasional duplicates in m_inliners, we'll get rid of them // in SortAndDeduplicate() anyway. int count = static_cast<int>(m_inliners.GetCount()); int start = max(0, count - 10); for (int i = count - 1; i >= start; i--) { if (m_inliners[i] == method) return; } //look like we see this inliner for the first time, add it to the collection m_inliners.Append(method); } InlineTrackingMap::InlineTrackingMap() : m_mapCrst(CrstInlineTrackingMap) { STANDARD_VM_CONTRACT; } void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee) { STANDARD_VM_CONTRACT; _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); MethodInModule inlineeMnM(inlinee->GetModule(), inlinee->GetMemberDef()); if (RidFromToken(inlineeMnM.m_methodDef) == 0 || RidFromToken(inliner->GetMemberDef()) == 0) { // Sometimes we do see methods that don't have valid tokens (stubs etc) // we just ignore them. return; } CrstHolder lock(&m_mapCrst); InlineTrackingEntry *existingEntry = const_cast<InlineTrackingEntry *>(LookupPtr(inlineeMnM)); if (existingEntry) { // We saw this inlinee before, just add one more inliner existingEntry->Add(inliner); } else { // We haven't seen this inlinee before, create a new record in the hashtable // and add a first inliner to it. InlineTrackingEntry newEntry; newEntry.m_inlinee = inlineeMnM; newEntry.Add(inliner); Add(newEntry); } } #endif //!DACCESS_COMPILE #ifdef FEATURE_READYTORUN struct InliningHeader { int SizeOfInlineeIndex; }; #ifndef DACCESS_COMPILE BOOL PersistentInlineTrackingMapR2R::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer, AllocMemTracker *pamTracker, PersistentInlineTrackingMapR2R** ppLoadedMap) { InliningHeader* pHeader = (InliningHeader*)pBuffer; if (pHeader->SizeOfInlineeIndex > (int)(cbBuffer - sizeof(InliningHeader))) { //invalid serialized data, the index can't be larger the entire block _ASSERTE(!"R2R image is invalid or there is a bug in the R2R parser"); return FALSE; } //NOTE: Error checking on the format is very limited at this point. //We trust the image format is valid and this initial check is a cheap //verification that may help catch simple bugs. It does not secure against //a deliberately maliciously formed binary. LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap(); void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R))); PersistentInlineTrackingMapR2R* pMap = new (pMemory) PersistentInlineTrackingMapR2R(); pMap->m_module = pModule; pMap->m_inlineeIndex = (PTR_ZapInlineeRecord)(pHeader + 1); pMap->m_inlineeIndexSize = pHeader->SizeOfInlineeIndex / sizeof(ZapInlineeRecord); pMap->m_inlinersBuffer = ((PTR_BYTE)(pHeader+1)) + pHeader->SizeOfInlineeIndex; pMap->m_inlinersBufferSize = cbBuffer - sizeof(InliningHeader) - pMap->m_inlineeIndexSize; *ppLoadedMap = pMap; return TRUE; } #endif //!DACCESS_COMPILE COUNT_T PersistentInlineTrackingMapR2R::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(inlineeOwnerMod); _ASSERTE(inliners != NULL || inlinersSize == 0); if (incompleteData) { *incompleteData = FALSE; } if (m_inlineeIndex == NULL || m_inlinersBuffer == NULL) { //No inlines saved in this image. return 0; } if(inlineeOwnerMod != m_module) { // no cross module inlining (yet?) return 0; } // Binary search to find all records matching (inlineeTkn) ZapInlineeRecord probeRecord; probeRecord.InitForR2R(RidFromToken(inlineeTkn)); ZapInlineeRecord *begin = m_inlineeIndex; ZapInlineeRecord *end = m_inlineeIndex + m_inlineeIndexSize; ZapInlineeRecord *foundRecord = util::lower_bound(begin, end, probeRecord); DWORD result = 0; DWORD outputIndex = 0; // Go through all matching records for (; foundRecord < end && *foundRecord == probeRecord; foundRecord++) { DWORD offset = foundRecord->m_offset; NibbleReader stream(m_inlinersBuffer + offset, m_inlinersBufferSize - offset); Module *inlinerModule = m_module; DWORD inlinersCount = stream.ReadEncodedU32(); _ASSERTE(inlinersCount > 0); RID inlinerRid = 0; // Reading inliner RIDs one by one, each RID is represented as an adjustment (diff) to the previous one. // Adding inliners module and coping to the output buffer for (DWORD i = 0; i < inlinersCount && outputIndex < inlinersSize; i++) { inlinerRid += stream.ReadEncodedU32(); mdMethodDef inlinerTkn = TokenFromRid(inlinerRid, mdtMethodDef); inliners[outputIndex++] = MethodInModule(inlinerModule, inlinerTkn); } result += inlinersCount; } return result; } #ifndef DACCESS_COMPILE BOOL PersistentInlineTrackingMapR2R2::TryLoad(Module* pModule, const BYTE* pBuffer, DWORD cbBuffer, AllocMemTracker* pamTracker, PersistentInlineTrackingMapR2R2** ppLoadedMap) { LoaderHeap* pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap(); void* pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(PersistentInlineTrackingMapR2R2))); PersistentInlineTrackingMapR2R2* pMap = new (pMemory) PersistentInlineTrackingMapR2R2(); pMap->m_module = pModule; pMap->m_reader = NativeReader(pBuffer, cbBuffer); NativeParser parser = NativeParser(&pMap->m_reader, 0); pMap->m_hashtable = NativeHashtable(parser); *ppLoadedMap = pMap; return TRUE; } COUNT_T PersistentInlineTrackingMapR2R2::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL* incompleteData) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(inlineeOwnerMod); _ASSERTE(inliners != NULL || inlinersSize == 0); if (incompleteData) { *incompleteData = FALSE; } DWORD result = 0; int hashCode = GetVersionResilientModuleHashCode(inlineeOwnerMod); hashCode ^= inlineeTkn; NativeHashtable::Enumerator lookup = m_hashtable.Lookup(hashCode); NativeParser entryParser; while (lookup.GetNext(entryParser)) { DWORD streamSize = entryParser.GetUnsigned(); _ASSERTE(streamSize > 1); // First make sure this is the right inlinee and not just a hash collision DWORD inlineeRidAndFlag = entryParser.GetUnsigned(); streamSize--; mdMethodDef inlineeToken = TokenFromRid(inlineeRidAndFlag >> 1, mdtMethodDef); if (inlineeToken != inlineeTkn) { continue; } Module* inlineeModule; if ((inlineeRidAndFlag & 1) != 0) { inlineeModule = GetModuleByIndex(entryParser.GetUnsigned()); streamSize--; _ASSERTE(streamSize > 0); } else { inlineeModule = m_module; } if (inlineeModule != inlineeOwnerMod) { continue; } // We have the right inlinee, let's look at the inliners DWORD currentInlinerRid = 0; do { DWORD inlinerRidDeltaAndFlag = entryParser.GetUnsigned(); streamSize--; currentInlinerRid += inlinerRidDeltaAndFlag >> 1; Module* inlinerModule; if ((inlinerRidDeltaAndFlag & 1) != 0) { _ASSERTE(streamSize > 0); inlinerModule = GetModuleByIndex(entryParser.GetUnsigned()); streamSize--; if (inlinerModule == nullptr && incompleteData) { // We can't find module for this inlineeModuleZapIndex, it means it hasn't been loaded yet // (maybe it never will be), we just report it to the profiler. // Profiler might want to try later when more modules are loaded. *incompleteData = TRUE; continue; } } else { inlinerModule = m_module; } if (result < inlinersSize) { inliners[result].m_methodDef = TokenFromRid(currentInlinerRid, mdtMethodDef); inliners[result].m_module = inlinerModule; } result++; } while (streamSize > 0); } return result; } Module* PersistentInlineTrackingMapR2R2::GetModuleByIndex(DWORD index) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; // This "black magic spell" has in fact nothing to do with GenericInstantiationCompare per se, but just sets a thread flag // that later activates more thorough search inside Module::GetAssemblyIfLoaded, which is indirectly called from GetModuleFromIndexIfLoaded. // This is useful when ngen image was compiler against a different assembly version than the one loaded now. ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare); return m_module->GetModuleFromIndexIfLoaded(index); } #endif //!DACCESS_COMPILE #endif //FEATURE_READYTORUN #if !defined(DACCESS_COMPILE) JITInlineTrackingMap::JITInlineTrackingMap(LoaderAllocator *pAssociatedLoaderAllocator) : m_mapCrst(CrstJitInlineTrackingMap), m_map() { LIMITED_METHOD_CONTRACT; m_map.Init(pAssociatedLoaderAllocator); } BOOL JITInlineTrackingMap::InliningExistsDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; _ASSERTE(m_mapCrst.OwnedByCurrentThread()); _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); BOOL found = FALSE; auto lambda = [&](OBJECTREF obj, MethodDesc *lambdaInlinee, MethodDesc *lambdaInliner) { _ASSERTE(inlinee == lambdaInlinee); if (lambdaInliner == inliner) { found = TRUE; return false; } return true; }; m_map.VisitValuesOfKey(inlinee, lambda); return found; } void JITInlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; inlinee = inlinee->LoadTypicalMethodDefinition(); CrstHolder holder(&m_mapCrst); AddInliningDontTakeLock(inliner, inlinee); } void JITInlineTrackingMap::AddInliningDontTakeLock(MethodDesc *inliner, MethodDesc *inlinee) { LIMITED_METHOD_CONTRACT; _ASSERTE(m_mapCrst.OwnedByCurrentThread()); _ASSERTE(inliner != NULL); _ASSERTE(inlinee != NULL); GCX_COOP(); if (!InliningExistsDontTakeLock(inliner, inlinee)) { LoaderAllocator *loaderAllocatorOfInliner = inliner->GetLoaderAllocator(); m_map.Add(inlinee, inliner, loaderAllocatorOfInliner); } } #endif // !defined(DACCESS_COMPILE)
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/inlinepolicy.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Inlining Policies // // This file contains class definitions for various inlining // policies used by the jit. // // -- CLASSES -- // // LegalPolicy - partial class providing common legality checks // DefaultPolicy - default inliner policy // ExtendedDefaltPolicy - a more aggressive and profile-driven variation of DefaultPolicy // DiscretionaryPolicy - default variant with uniform size policy // ModelPolicy - policy based on statistical modelling // ProfilePolicy - policy based on statistical modelling and profile feedback // // These experimental policies are available only in // DEBUG or release+INLINE_DATA builds of the jit. // // RandomPolicy - randomized inlining // FullPolicy - inlines everything up to size and depth limits // SizePolicy - tries not to increase method sizes // // The default policy in use is the DefaultPolicy. #ifndef _INLINE_POLICY_H_ #define _INLINE_POLICY_H_ #include "jit.h" #include "inline.h" // LegalPolicy is a partial policy that encapsulates the common // legality and ability checks the inliner must make. // // Generally speaking, the legal policy expects the inlining attempt // to fail fast when a fatal or equivalent observation is made. So // once an observation causes failure, no more observations are // expected. However for the prejit scan case (where the jit is not // actually inlining, but is assessing a method's general // inlinability) the legal policy allows multiple failing // observations provided they have the same impact. Only the first // observation that puts the policy into a failing state is // remembered. Transitions from failing states to candidate or success // states are not allowed. class LegalPolicy : public InlinePolicy { public: // Constructor LegalPolicy(bool isPrejitRoot) : InlinePolicy(isPrejitRoot) { // empty } // Handle an observation that must cause inlining to fail. void NoteFatal(InlineObservation obs) override; #if defined(DEBUG) || defined(INLINE_DATA) // Record observation for prior failure void NotePriorFailure(InlineObservation obs) override; #endif // defined(DEBUG) || defined(INLINE_DATA) protected: // Helper methods void NoteInternal(InlineObservation obs); void SetCandidate(InlineObservation obs); void SetFailure(InlineObservation obs); void SetNever(InlineObservation obs); }; // Forward declaration for the state machine class used by the // DefaultPolicy class CodeSeqSM; // DefaultPolicy implements the default inlining policy for the jit. class DefaultPolicy : public LegalPolicy { public: // Construct a DefaultPolicy DefaultPolicy(Compiler* compiler, bool isPrejitRoot) : LegalPolicy(isPrejitRoot) , m_RootCompiler(compiler) , m_StateMachine(nullptr) , m_Multiplier(0.0) , m_CodeSize(0) , m_CallsiteFrequency(InlineCallsiteFrequency::UNUSED) , m_CallsiteDepth(0) , m_InstructionCount(0) , m_LoadStoreCount(0) , m_ArgFeedsTest(0) , m_ArgFeedsConstantTest(0) , m_ArgFeedsRangeCheck(0) , m_ConstantArgFeedsConstantTest(0) , m_CalleeNativeSizeEstimate(0) , m_CallsiteNativeSizeEstimate(0) , m_IsForceInline(false) , m_IsForceInlineKnown(false) , m_IsInstanceCtor(false) , m_IsFromPromotableValueClass(false) , m_HasSimd(false) , m_LooksLikeWrapperMethod(false) , m_MethodIsMostlyLoadStore(false) , m_CallsiteIsInTryRegion(false) , m_CallsiteIsInLoop(false) , m_IsNoReturn(false) , m_IsNoReturnKnown(false) , m_ConstArgFeedsIsKnownConst(false) , m_ArgFeedsIsKnownConst(false) { // empty } // Policy observations void NoteSuccess() override; void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; bool BudgetCheck() const override; // Policy policies bool PropagateNeverToRuntime() const override; // Policy estimates int CodeSizeEstimate() override; #if defined(DEBUG) || defined(INLINE_DATA) void OnDumpXml(FILE* file, unsigned indent = 0) const override; const char* GetName() const override { return "DefaultPolicy"; } #endif // (DEBUG) || defined(INLINE_DATA) protected: // Constants enum { MAX_BASIC_BLOCKS = 5, SIZE_SCALE = 10 }; // Helper methods virtual double DetermineMultiplier(); int DetermineNativeSizeEstimate(); int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); // Data members Compiler* m_RootCompiler; // root compiler instance CodeSeqSM* m_StateMachine; double m_Multiplier; unsigned m_CodeSize; InlineCallsiteFrequency m_CallsiteFrequency; unsigned m_CallsiteDepth; unsigned m_InstructionCount; unsigned m_LoadStoreCount; unsigned m_ArgFeedsTest; unsigned m_ArgFeedsConstantTest; unsigned m_ArgFeedsRangeCheck; unsigned m_ConstantArgFeedsConstantTest; int m_CalleeNativeSizeEstimate; int m_CallsiteNativeSizeEstimate; bool m_IsForceInline : 1; bool m_IsForceInlineKnown : 1; bool m_IsInstanceCtor : 1; bool m_IsFromPromotableValueClass : 1; bool m_HasSimd : 1; bool m_LooksLikeWrapperMethod : 1; bool m_MethodIsMostlyLoadStore : 1; bool m_CallsiteIsInTryRegion : 1; bool m_CallsiteIsInLoop : 1; bool m_IsNoReturn : 1; bool m_IsNoReturnKnown : 1; bool m_ConstArgFeedsIsKnownConst : 1; bool m_ArgFeedsIsKnownConst : 1; }; // ExtendedDefaultPolicy is a slightly more aggressive variant of // DefaultPolicy with an extended list of observations including profile data. class ExtendedDefaultPolicy : public DefaultPolicy { public: ExtendedDefaultPolicy(Compiler* compiler, bool isPrejitRoot) : DefaultPolicy(compiler, isPrejitRoot) , m_ProfileFrequency(0.0) , m_BinaryExprWithCns(0) , m_ArgCasted(0) , m_ArgIsStructByValue(0) , m_FldAccessOverArgStruct(0) , m_FoldableBox(0) , m_Intrinsic(0) , m_BackwardJump(0) , m_ThrowBlock(0) , m_ArgIsExactCls(0) , m_ArgIsExactClsSigIsNot(0) , m_ArgIsConst(0) , m_ArgIsBoxedAtCallsite(0) , m_FoldableIntrinsic(0) , m_FoldableExpr(0) , m_FoldableExprUn(0) , m_FoldableBranch(0) , m_FoldableSwitch(0) , m_Switch(0) , m_DivByCns(0) , m_ReturnsStructByValue(false) , m_IsFromValueClass(false) , m_NonGenericCallsGeneric(false) , m_IsCallsiteInNoReturnRegion(false) , m_HasProfile(false) { // Empty } void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; double DetermineMultiplier() override; bool RequiresPreciseScan() override { return true; } #if defined(DEBUG) || defined(INLINE_DATA) void OnDumpXml(FILE* file, unsigned indent = 0) const override; const char* GetName() const override { return "ExtendedDefaultPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) protected: double m_ProfileFrequency; unsigned m_BinaryExprWithCns; unsigned m_ArgCasted; unsigned m_ArgIsStructByValue; unsigned m_FldAccessOverArgStruct; unsigned m_FoldableBox; unsigned m_Intrinsic; unsigned m_BackwardJump; unsigned m_ThrowBlock; unsigned m_ArgIsExactCls; unsigned m_ArgIsExactClsSigIsNot; unsigned m_ArgIsConst; unsigned m_ArgIsBoxedAtCallsite; unsigned m_FoldableIntrinsic; unsigned m_FoldableExpr; unsigned m_FoldableExprUn; unsigned m_FoldableBranch; unsigned m_FoldableSwitch; unsigned m_Switch; unsigned m_DivByCns; bool m_ReturnsStructByValue : 1; bool m_IsFromValueClass : 1; bool m_NonGenericCallsGeneric : 1; bool m_IsCallsiteInNoReturnRegion : 1; bool m_HasProfile : 1; }; // DiscretionaryPolicy is a variant of the default policy. It // differs in that there is no ALWAYS_INLINE class, there is no IL // size limit, and in prejit mode, discretionary failures do not // propagate the "NEVER" inline bit to the runtime. // // It is useful for gathering data about inline costs. class DiscretionaryPolicy : public DefaultPolicy { public: // Construct a DiscretionaryPolicy DiscretionaryPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; // Policy policies bool PropagateNeverToRuntime() const override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Policy estimates int CodeSizeEstimate() override; #if defined(DEBUG) || defined(INLINE_DATA) // Externalize data void DumpData(FILE* file) const override; void DumpSchema(FILE* file) const override; // Miscellaneous const char* GetName() const override { return "DiscretionaryPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) protected: void ComputeOpcodeBin(OPCODE opcode); void EstimateCodeSize(); void EstimatePerformanceImpact(); void MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo); enum { MAX_ARGS = 6 }; double m_ProfileFrequency; unsigned m_BlockCount; unsigned m_Maxstack; unsigned m_ArgCount; CorInfoType m_ArgType[MAX_ARGS]; size_t m_ArgSize[MAX_ARGS]; unsigned m_LocalCount; CorInfoType m_ReturnType; size_t m_ReturnSize; unsigned m_ArgAccessCount; unsigned m_LocalAccessCount; unsigned m_IntConstantCount; unsigned m_FloatConstantCount; unsigned m_IntLoadCount; unsigned m_FloatLoadCount; unsigned m_IntStoreCount; unsigned m_FloatStoreCount; unsigned m_SimpleMathCount; unsigned m_ComplexMathCount; unsigned m_OverflowMathCount; unsigned m_IntArrayLoadCount; unsigned m_FloatArrayLoadCount; unsigned m_RefArrayLoadCount; unsigned m_StructArrayLoadCount; unsigned m_IntArrayStoreCount; unsigned m_FloatArrayStoreCount; unsigned m_RefArrayStoreCount; unsigned m_StructArrayStoreCount; unsigned m_StructOperationCount; unsigned m_ObjectModelCount; unsigned m_FieldLoadCount; unsigned m_FieldStoreCount; unsigned m_StaticFieldLoadCount; unsigned m_StaticFieldStoreCount; unsigned m_LoadAddressCount; unsigned m_ThrowCount; unsigned m_ReturnCount; unsigned m_CallCount; unsigned m_CallSiteWeight; int m_ModelCodeSizeEstimate; int m_PerCallInstructionEstimate; bool m_HasProfile; bool m_IsClassCtor; bool m_IsSameThis; bool m_CallerHasNewArray; bool m_CallerHasNewObj; bool m_CalleeHasGCStruct; }; // ModelPolicy is an experimental policy that uses the results // of data modelling to make estimates. class ModelPolicy : public DiscretionaryPolicy { public: // Construct a ModelPolicy ModelPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Policy policies bool PropagateNeverToRuntime() const override { return true; } #if defined(DEBUG) || defined(INLINE_DATA) // Miscellaneous const char* GetName() const override { return "ModelPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) }; // ProfilePolicy is an experimental policy that uses the results // of data modelling and profile feedback to make estimates. class ProfilePolicy : public DiscretionaryPolicy { public: // Construct a ProfilePolicy ProfilePolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; #if defined(DEBUG) || defined(INLINE_DATA) // Miscellaneous const char* GetName() const override { return "ProfilePolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) }; #if defined(DEBUG) || defined(INLINE_DATA) // RandomPolicy implements a policy that inlines at random. // It is mostly useful for stress testing. class RandomPolicy : public DiscretionaryPolicy { public: // Construct a RandomPolicy RandomPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; const char* GetName() const override { return "RandomPolicy"; } private: // Data members CLRRandom* m_Random; }; #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || defined(INLINE_DATA) // FullPolicy is an experimental policy that will always inline if // possible, subject to externally settable depth and size limits. // // It's useful for uncovering the full set of possible inlines for // methods. class FullPolicy : public DiscretionaryPolicy { public: // Construct a FullPolicy FullPolicy(Compiler* compiler, bool isPrejitRoot); // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; bool BudgetCheck() const override; // Miscellaneous const char* GetName() const override { return "FullPolicy"; } }; // SizePolicy is an experimental policy that will inline as much // as possible without increasing the (estimated) method size. // // It may be useful down the road as a policy to use for methods // that are rarely executed (eg class constructors). class SizePolicy : public DiscretionaryPolicy { public: // Construct a SizePolicy SizePolicy(Compiler* compiler, bool isPrejitRoot); // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Miscellaneous const char* GetName() const override { return "SizePolicy"; } }; // The ReplayPolicy performs only inlines specified by an external // inline replay log. class ReplayPolicy : public DiscretionaryPolicy { public: // Construct a ReplayPolicy ReplayPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteBool(InlineObservation obs, bool value) override; // Optional observations void NoteContext(InlineContext* context) override { m_InlineContext = context; } void NoteOffset(IL_OFFSET offset) override { m_Offset = offset; } // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Miscellaneous const char* GetName() const override { return "ReplayPolicy"; } static void FinalizeXml(); private: bool FindMethod(); bool FindContext(InlineContext* context); bool FindInline(CORINFO_METHOD_HANDLE callee); bool FindInline(unsigned token, unsigned hash, unsigned offset); static bool s_WroteReplayBanner; static FILE* s_ReplayFile; static CritSecObject s_XmlReaderLock; InlineContext* m_InlineContext; IL_OFFSET m_Offset; bool m_WasForceInline; }; #endif // defined(DEBUG) || defined(INLINE_DATA) #endif // _INLINE_POLICY_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Inlining Policies // // This file contains class definitions for various inlining // policies used by the jit. // // -- CLASSES -- // // LegalPolicy - partial class providing common legality checks // DefaultPolicy - default inliner policy // ExtendedDefaltPolicy - a more aggressive and profile-driven variation of DefaultPolicy // DiscretionaryPolicy - default variant with uniform size policy // ModelPolicy - policy based on statistical modelling // ProfilePolicy - policy based on statistical modelling and profile feedback // // These experimental policies are available only in // DEBUG or release+INLINE_DATA builds of the jit. // // RandomPolicy - randomized inlining // FullPolicy - inlines everything up to size and depth limits // SizePolicy - tries not to increase method sizes // // The default policy in use is the DefaultPolicy. #ifndef _INLINE_POLICY_H_ #define _INLINE_POLICY_H_ #include "jit.h" #include "inline.h" // LegalPolicy is a partial policy that encapsulates the common // legality and ability checks the inliner must make. // // Generally speaking, the legal policy expects the inlining attempt // to fail fast when a fatal or equivalent observation is made. So // once an observation causes failure, no more observations are // expected. However for the prejit scan case (where the jit is not // actually inlining, but is assessing a method's general // inlinability) the legal policy allows multiple failing // observations provided they have the same impact. Only the first // observation that puts the policy into a failing state is // remembered. Transitions from failing states to candidate or success // states are not allowed. class LegalPolicy : public InlinePolicy { public: // Constructor LegalPolicy(bool isPrejitRoot) : InlinePolicy(isPrejitRoot) { // empty } // Handle an observation that must cause inlining to fail. void NoteFatal(InlineObservation obs) override; #if defined(DEBUG) || defined(INLINE_DATA) // Record observation for prior failure void NotePriorFailure(InlineObservation obs) override; #endif // defined(DEBUG) || defined(INLINE_DATA) protected: // Helper methods void NoteInternal(InlineObservation obs); void SetCandidate(InlineObservation obs); void SetFailure(InlineObservation obs); void SetNever(InlineObservation obs); }; // Forward declaration for the state machine class used by the // DefaultPolicy class CodeSeqSM; // DefaultPolicy implements the default inlining policy for the jit. class DefaultPolicy : public LegalPolicy { public: // Construct a DefaultPolicy DefaultPolicy(Compiler* compiler, bool isPrejitRoot) : LegalPolicy(isPrejitRoot) , m_RootCompiler(compiler) , m_StateMachine(nullptr) , m_Multiplier(0.0) , m_CodeSize(0) , m_CallsiteFrequency(InlineCallsiteFrequency::UNUSED) , m_CallsiteDepth(0) , m_InstructionCount(0) , m_LoadStoreCount(0) , m_ArgFeedsTest(0) , m_ArgFeedsConstantTest(0) , m_ArgFeedsRangeCheck(0) , m_ConstantArgFeedsConstantTest(0) , m_CalleeNativeSizeEstimate(0) , m_CallsiteNativeSizeEstimate(0) , m_IsForceInline(false) , m_IsForceInlineKnown(false) , m_IsInstanceCtor(false) , m_IsFromPromotableValueClass(false) , m_HasSimd(false) , m_LooksLikeWrapperMethod(false) , m_MethodIsMostlyLoadStore(false) , m_CallsiteIsInTryRegion(false) , m_CallsiteIsInLoop(false) , m_IsNoReturn(false) , m_IsNoReturnKnown(false) , m_ConstArgFeedsIsKnownConst(false) , m_ArgFeedsIsKnownConst(false) { // empty } // Policy observations void NoteSuccess() override; void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; bool BudgetCheck() const override; // Policy policies bool PropagateNeverToRuntime() const override; // Policy estimates int CodeSizeEstimate() override; #if defined(DEBUG) || defined(INLINE_DATA) void OnDumpXml(FILE* file, unsigned indent = 0) const override; const char* GetName() const override { return "DefaultPolicy"; } #endif // (DEBUG) || defined(INLINE_DATA) protected: // Constants enum { MAX_BASIC_BLOCKS = 5, SIZE_SCALE = 10 }; // Helper methods virtual double DetermineMultiplier(); int DetermineNativeSizeEstimate(); int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo); // Data members Compiler* m_RootCompiler; // root compiler instance CodeSeqSM* m_StateMachine; double m_Multiplier; unsigned m_CodeSize; InlineCallsiteFrequency m_CallsiteFrequency; unsigned m_CallsiteDepth; unsigned m_InstructionCount; unsigned m_LoadStoreCount; unsigned m_ArgFeedsTest; unsigned m_ArgFeedsConstantTest; unsigned m_ArgFeedsRangeCheck; unsigned m_ConstantArgFeedsConstantTest; int m_CalleeNativeSizeEstimate; int m_CallsiteNativeSizeEstimate; bool m_IsForceInline : 1; bool m_IsForceInlineKnown : 1; bool m_IsInstanceCtor : 1; bool m_IsFromPromotableValueClass : 1; bool m_HasSimd : 1; bool m_LooksLikeWrapperMethod : 1; bool m_MethodIsMostlyLoadStore : 1; bool m_CallsiteIsInTryRegion : 1; bool m_CallsiteIsInLoop : 1; bool m_IsNoReturn : 1; bool m_IsNoReturnKnown : 1; bool m_ConstArgFeedsIsKnownConst : 1; bool m_ArgFeedsIsKnownConst : 1; }; // ExtendedDefaultPolicy is a slightly more aggressive variant of // DefaultPolicy with an extended list of observations including profile data. class ExtendedDefaultPolicy : public DefaultPolicy { public: ExtendedDefaultPolicy(Compiler* compiler, bool isPrejitRoot) : DefaultPolicy(compiler, isPrejitRoot) , m_ProfileFrequency(0.0) , m_BinaryExprWithCns(0) , m_ArgCasted(0) , m_ArgIsStructByValue(0) , m_FldAccessOverArgStruct(0) , m_FoldableBox(0) , m_Intrinsic(0) , m_BackwardJump(0) , m_ThrowBlock(0) , m_ArgIsExactCls(0) , m_ArgIsExactClsSigIsNot(0) , m_ArgIsConst(0) , m_ArgIsBoxedAtCallsite(0) , m_FoldableIntrinsic(0) , m_FoldableExpr(0) , m_FoldableExprUn(0) , m_FoldableBranch(0) , m_FoldableSwitch(0) , m_Switch(0) , m_DivByCns(0) , m_ReturnsStructByValue(false) , m_IsFromValueClass(false) , m_NonGenericCallsGeneric(false) , m_IsCallsiteInNoReturnRegion(false) , m_HasProfile(false) { // Empty } void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; double DetermineMultiplier() override; bool RequiresPreciseScan() override { return true; } #if defined(DEBUG) || defined(INLINE_DATA) void OnDumpXml(FILE* file, unsigned indent = 0) const override; const char* GetName() const override { return "ExtendedDefaultPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) protected: double m_ProfileFrequency; unsigned m_BinaryExprWithCns; unsigned m_ArgCasted; unsigned m_ArgIsStructByValue; unsigned m_FldAccessOverArgStruct; unsigned m_FoldableBox; unsigned m_Intrinsic; unsigned m_BackwardJump; unsigned m_ThrowBlock; unsigned m_ArgIsExactCls; unsigned m_ArgIsExactClsSigIsNot; unsigned m_ArgIsConst; unsigned m_ArgIsBoxedAtCallsite; unsigned m_FoldableIntrinsic; unsigned m_FoldableExpr; unsigned m_FoldableExprUn; unsigned m_FoldableBranch; unsigned m_FoldableSwitch; unsigned m_Switch; unsigned m_DivByCns; bool m_ReturnsStructByValue : 1; bool m_IsFromValueClass : 1; bool m_NonGenericCallsGeneric : 1; bool m_IsCallsiteInNoReturnRegion : 1; bool m_HasProfile : 1; }; // DiscretionaryPolicy is a variant of the default policy. It // differs in that there is no ALWAYS_INLINE class, there is no IL // size limit, and in prejit mode, discretionary failures do not // propagate the "NEVER" inline bit to the runtime. // // It is useful for gathering data about inline costs. class DiscretionaryPolicy : public DefaultPolicy { public: // Construct a DiscretionaryPolicy DiscretionaryPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteBool(InlineObservation obs, bool value) override; void NoteInt(InlineObservation obs, int value) override; void NoteDouble(InlineObservation obs, double value) override; // Policy policies bool PropagateNeverToRuntime() const override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Policy estimates int CodeSizeEstimate() override; #if defined(DEBUG) || defined(INLINE_DATA) // Externalize data void DumpData(FILE* file) const override; void DumpSchema(FILE* file) const override; // Miscellaneous const char* GetName() const override { return "DiscretionaryPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) protected: void ComputeOpcodeBin(OPCODE opcode); void EstimateCodeSize(); void EstimatePerformanceImpact(); void MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo); enum { MAX_ARGS = 6 }; double m_ProfileFrequency; unsigned m_BlockCount; unsigned m_Maxstack; unsigned m_ArgCount; CorInfoType m_ArgType[MAX_ARGS]; size_t m_ArgSize[MAX_ARGS]; unsigned m_LocalCount; CorInfoType m_ReturnType; size_t m_ReturnSize; unsigned m_ArgAccessCount; unsigned m_LocalAccessCount; unsigned m_IntConstantCount; unsigned m_FloatConstantCount; unsigned m_IntLoadCount; unsigned m_FloatLoadCount; unsigned m_IntStoreCount; unsigned m_FloatStoreCount; unsigned m_SimpleMathCount; unsigned m_ComplexMathCount; unsigned m_OverflowMathCount; unsigned m_IntArrayLoadCount; unsigned m_FloatArrayLoadCount; unsigned m_RefArrayLoadCount; unsigned m_StructArrayLoadCount; unsigned m_IntArrayStoreCount; unsigned m_FloatArrayStoreCount; unsigned m_RefArrayStoreCount; unsigned m_StructArrayStoreCount; unsigned m_StructOperationCount; unsigned m_ObjectModelCount; unsigned m_FieldLoadCount; unsigned m_FieldStoreCount; unsigned m_StaticFieldLoadCount; unsigned m_StaticFieldStoreCount; unsigned m_LoadAddressCount; unsigned m_ThrowCount; unsigned m_ReturnCount; unsigned m_CallCount; unsigned m_CallSiteWeight; int m_ModelCodeSizeEstimate; int m_PerCallInstructionEstimate; bool m_HasProfile; bool m_IsClassCtor; bool m_IsSameThis; bool m_CallerHasNewArray; bool m_CallerHasNewObj; bool m_CalleeHasGCStruct; }; // ModelPolicy is an experimental policy that uses the results // of data modelling to make estimates. class ModelPolicy : public DiscretionaryPolicy { public: // Construct a ModelPolicy ModelPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Policy policies bool PropagateNeverToRuntime() const override { return true; } #if defined(DEBUG) || defined(INLINE_DATA) // Miscellaneous const char* GetName() const override { return "ModelPolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) }; // ProfilePolicy is an experimental policy that uses the results // of data modelling and profile feedback to make estimates. class ProfilePolicy : public DiscretionaryPolicy { public: // Construct a ProfilePolicy ProfilePolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; #if defined(DEBUG) || defined(INLINE_DATA) // Miscellaneous const char* GetName() const override { return "ProfilePolicy"; } #endif // defined(DEBUG) || defined(INLINE_DATA) }; #if defined(DEBUG) || defined(INLINE_DATA) // RandomPolicy implements a policy that inlines at random. // It is mostly useful for stress testing. class RandomPolicy : public DiscretionaryPolicy { public: // Construct a RandomPolicy RandomPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteInt(InlineObservation obs, int value) override; // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; const char* GetName() const override { return "RandomPolicy"; } private: // Data members CLRRandom* m_Random; }; #endif // defined(DEBUG) || defined(INLINE_DATA) #if defined(DEBUG) || defined(INLINE_DATA) // FullPolicy is an experimental policy that will always inline if // possible, subject to externally settable depth and size limits. // // It's useful for uncovering the full set of possible inlines for // methods. class FullPolicy : public DiscretionaryPolicy { public: // Construct a FullPolicy FullPolicy(Compiler* compiler, bool isPrejitRoot); // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; bool BudgetCheck() const override; // Miscellaneous const char* GetName() const override { return "FullPolicy"; } }; // SizePolicy is an experimental policy that will inline as much // as possible without increasing the (estimated) method size. // // It may be useful down the road as a policy to use for methods // that are rarely executed (eg class constructors). class SizePolicy : public DiscretionaryPolicy { public: // Construct a SizePolicy SizePolicy(Compiler* compiler, bool isPrejitRoot); // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Miscellaneous const char* GetName() const override { return "SizePolicy"; } }; // The ReplayPolicy performs only inlines specified by an external // inline replay log. class ReplayPolicy : public DiscretionaryPolicy { public: // Construct a ReplayPolicy ReplayPolicy(Compiler* compiler, bool isPrejitRoot); // Policy observations void NoteBool(InlineObservation obs, bool value) override; // Optional observations void NoteContext(InlineContext* context) override { m_InlineContext = context; } void NoteOffset(IL_OFFSET offset) override { m_Offset = offset; } // Policy determinations void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override; // Miscellaneous const char* GetName() const override { return "ReplayPolicy"; } static void FinalizeXml(); private: bool FindMethod(); bool FindContext(InlineContext* context); bool FindInline(CORINFO_METHOD_HANDLE callee); bool FindInline(unsigned token, unsigned hash, unsigned offset); static bool s_WroteReplayBanner; static FILE* s_ReplayFile; static CritSecObject s_XmlReaderLock; InlineContext* m_InlineContext; IL_OFFSET m_Offset; bool m_WasForceInline; }; #endif // defined(DEBUG) || defined(INLINE_DATA) #endif // _INLINE_POLICY_H_
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/native/eventpipe/ep-session-provider.h
#ifndef __EVENTPIPE_SESSION_PROVIDER_H__ #define __EVENTPIPE_SESSION_PROVIDER_H__ #ifdef ENABLE_PERFTRACING #include "ep-rt-config.h" #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_SESSION_PROVIDER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeSessionProvider. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProvider { #else struct _EventPipeSessionProvider_Internal { #endif ep_char8_t *provider_name; uint64_t keywords; EventPipeEventLevel logging_level; ep_char8_t *filter_data; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProvider { uint8_t _internal [sizeof (struct _EventPipeSessionProvider_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, const ep_char8_t *, provider_name) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, uint64_t, keywords) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, EventPipeEventLevel, logging_level) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, const ep_char8_t *, filter_data) EventPipeSessionProvider * ep_session_provider_alloc ( const ep_char8_t *provider_name, uint64_t keywords, EventPipeEventLevel logging_level, const ep_char8_t *filter_data); void ep_session_provider_free (EventPipeSessionProvider * session_provider); /* * EventPipeSessionProviderList. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProviderList { #else struct _EventPipeSessionProviderList_Internal { #endif ep_rt_session_provider_list_t providers; EventPipeSessionProvider *catch_all_provider; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProviderList { uint8_t _internal [sizeof (struct _EventPipeSessionProviderList_Internal)]; }; #endif EP_DEFINE_GETTER_REF(EventPipeSessionProviderList *, session_provider_list, ep_rt_session_provider_list_t *, providers) EP_DEFINE_GETTER(EventPipeSessionProviderList *, session_provider_list, EventPipeSessionProvider *, catch_all_provider) EventPipeSessionProviderList * ep_session_provider_list_alloc ( const EventPipeProviderConfiguration *configs, uint32_t configs_len); void ep_session_provider_list_free (EventPipeSessionProviderList *session_provider_list); void ep_session_provider_list_clear (EventPipeSessionProviderList *session_provider_list); bool ep_session_provider_list_is_empty (const EventPipeSessionProviderList *session_provider_list); bool ep_session_provider_list_add_session_provider ( EventPipeSessionProviderList *session_provider_list, EventPipeSessionProvider *session_provider); #endif /* ENABLE_PERFTRACING */ #endif /** __EVENTPIPE_SESSION_PROVIDER_H__ **/
#ifndef __EVENTPIPE_SESSION_PROVIDER_H__ #define __EVENTPIPE_SESSION_PROVIDER_H__ #ifdef ENABLE_PERFTRACING #include "ep-rt-config.h" #include "ep-types.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_SESSION_PROVIDER_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeSessionProvider. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProvider { #else struct _EventPipeSessionProvider_Internal { #endif ep_char8_t *provider_name; uint64_t keywords; EventPipeEventLevel logging_level; ep_char8_t *filter_data; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProvider { uint8_t _internal [sizeof (struct _EventPipeSessionProvider_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, const ep_char8_t *, provider_name) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, uint64_t, keywords) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, EventPipeEventLevel, logging_level) EP_DEFINE_GETTER(EventPipeSessionProvider *, session_provider, const ep_char8_t *, filter_data) EventPipeSessionProvider * ep_session_provider_alloc ( const ep_char8_t *provider_name, uint64_t keywords, EventPipeEventLevel logging_level, const ep_char8_t *filter_data); void ep_session_provider_free (EventPipeSessionProvider * session_provider); /* * EventPipeSessionProviderList. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProviderList { #else struct _EventPipeSessionProviderList_Internal { #endif ep_rt_session_provider_list_t providers; EventPipeSessionProvider *catch_all_provider; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_SESSION_PROVIDER_GETTER_SETTER) struct _EventPipeSessionProviderList { uint8_t _internal [sizeof (struct _EventPipeSessionProviderList_Internal)]; }; #endif EP_DEFINE_GETTER_REF(EventPipeSessionProviderList *, session_provider_list, ep_rt_session_provider_list_t *, providers) EP_DEFINE_GETTER(EventPipeSessionProviderList *, session_provider_list, EventPipeSessionProvider *, catch_all_provider) EventPipeSessionProviderList * ep_session_provider_list_alloc ( const EventPipeProviderConfiguration *configs, uint32_t configs_len); void ep_session_provider_list_free (EventPipeSessionProviderList *session_provider_list); void ep_session_provider_list_clear (EventPipeSessionProviderList *session_provider_list); bool ep_session_provider_list_is_empty (const EventPipeSessionProviderList *session_provider_list); bool ep_session_provider_list_add_session_provider ( EventPipeSessionProviderList *session_provider_list, EventPipeSessionProvider *session_provider); #endif /* ENABLE_PERFTRACING */ #endif /** __EVENTPIPE_SESSION_PROVIDER_H__ **/
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/native/corehost/hostfxr.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __HOSTFXR_H__ #define __HOSTFXR_H__ #include <stddef.h> #include <stdint.h> #if defined(_WIN32) #define HOSTFXR_CALLTYPE __cdecl #ifdef _WCHAR_T_DEFINED typedef wchar_t char_t; #else typedef unsigned short char_t; #endif #else #define HOSTFXR_CALLTYPE typedef char char_t; #endif enum hostfxr_delegate_type { hdt_com_activation, hdt_load_in_memory_assembly, hdt_winrt_activation, hdt_com_register, hdt_com_unregister, hdt_load_assembly_and_get_function_pointer, hdt_get_function_pointer, }; typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_main_fn)(const int argc, const char_t **argv); typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_main_startupinfo_fn)( const int argc, const char_t **argv, const char_t *host_path, const char_t *dotnet_root, const char_t *app_path); typedef int32_t(HOSTFXR_CALLTYPE* hostfxr_main_bundle_startupinfo_fn)( const int argc, const char_t** argv, const char_t* host_path, const char_t* dotnet_root, const char_t* app_path, int64_t bundle_header_offset); typedef void(HOSTFXR_CALLTYPE *hostfxr_error_writer_fn)(const char_t *message); // // Sets a callback which is to be used to write errors to. // // Parameters: // error_writer // A callback function which will be invoked every time an error is to be reported. // Or nullptr to unregister previously registered callback and return to the default behavior. // Return value: // The previously registered callback (which is now unregistered), or nullptr if no previous callback // was registered // // The error writer is registered per-thread, so the registration is thread-local. On each thread // only one callback can be registered. Subsequent registrations overwrite the previous ones. // // By default no callback is registered in which case the errors are written to stderr. // // Each call to the error writer is sort of like writing a single line (the EOL character is omitted). // Multiple calls to the error writer may occure for one failure. // // If the hostfxr invokes functions in hostpolicy as part of its operation, the error writer // will be propagated to hostpolicy for the duration of the call. This means that errors from // both hostfxr and hostpolicy will be reporter through the same error writer. // typedef hostfxr_error_writer_fn(HOSTFXR_CALLTYPE *hostfxr_set_error_writer_fn)(hostfxr_error_writer_fn error_writer); typedef void* hostfxr_handle; struct hostfxr_initialize_parameters { size_t size; const char_t *host_path; const char_t *dotnet_root; }; // // Initializes the hosting components for a dotnet command line running an application // // Parameters: // argc // Number of argv arguments // argv // Command-line arguments for running an application (as if through the dotnet executable). // Only command-line arguments which are accepted by runtime installation are supported, SDK/CLI commands are not supported. // For example 'app.dll app_argument_1 app_argument_2`. // parameters // Optional. Additional parameters for initialization // host_context_handle // On success, this will be populated with an opaque value representing the initialized host context // // Return value: // Success - Hosting components were successfully initialized // HostInvalidState - Hosting components are already initialized // // This function parses the specified command-line arguments to determine the application to run. It will // then find the corresponding .runtimeconfig.json and .deps.json with which to resolve frameworks and // dependencies and prepare everything needed to load the runtime. // // This function only supports arguments for running an application. It does not support SDK commands. // // This function does not load the runtime. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_initialize_for_dotnet_command_line_fn)( int argc, const char_t **argv, const struct hostfxr_initialize_parameters *parameters, /*out*/ hostfxr_handle *host_context_handle); // // Initializes the hosting components using a .runtimeconfig.json file // // Parameters: // runtime_config_path // Path to the .runtimeconfig.json file // parameters // Optional. Additional parameters for initialization // host_context_handle // On success, this will be populated with an opaque value representing the initialized host context // // Return value: // Success - Hosting components were successfully initialized // Success_HostAlreadyInitialized - Config is compatible with already initialized hosting components // Success_DifferentRuntimeProperties - Config has runtime properties that differ from already initialized hosting components // CoreHostIncompatibleConfig - Config is incompatible with already initialized hosting components // // This function will process the .runtimeconfig.json to resolve frameworks and prepare everything needed // to load the runtime. It will only process the .deps.json from frameworks (not any app/component that // may be next to the .runtimeconfig.json). // // This function does not load the runtime. // // If called when the runtime has already been loaded, this function will check if the specified runtime // config is compatible with the existing runtime. // // Both Success_HostAlreadyInitialized and Success_DifferentRuntimeProperties codes are considered successful // initializations. In the case of Success_DifferentRuntimeProperties, it is left to the consumer to verify that // the difference in properties is acceptable. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_initialize_for_runtime_config_fn)( const char_t *runtime_config_path, const struct hostfxr_initialize_parameters *parameters, /*out*/ hostfxr_handle *host_context_handle); // // Gets the runtime property value for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // name // Runtime property name // value // Out parameter. Pointer to a buffer with the property value. // // Return value: // The error code result. // // The buffer pointed to by value is owned by the host context. The lifetime of the buffer is only // guaranteed until any of the below occur: // - a 'run' method is called for the host context // - properties are changed via hostfxr_set_runtime_property_value // - the host context is closed via 'hostfxr_close' // // If host_context_handle is nullptr and an active host context exists, this function will get the // property value for the active host context. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_property_value_fn)( const hostfxr_handle host_context_handle, const char_t *name, /*out*/ const char_t **value); // // Sets the value of a runtime property for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // name // Runtime property name // value // Value to set // // Return value: // The error code result. // // Setting properties is only supported for the first host context, before the runtime has been loaded. // // If the property already exists in the host context, it will be overwritten. If value is nullptr, the // property will be removed. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_set_runtime_property_value_fn)( const hostfxr_handle host_context_handle, const char_t *name, const char_t *value); // // Gets all the runtime properties for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // count // [in] Size of the keys and values buffers // [out] Number of properties returned (size of keys/values buffers used). If the input value is too // small or keys/values is nullptr, this is populated with the number of available properties // keys // Array of pointers to buffers with runtime property keys // values // Array of pointers to buffers with runtime property values // // Return value: // The error code result. // // The buffers pointed to by keys and values are owned by the host context. The lifetime of the buffers is only // guaranteed until any of the below occur: // - a 'run' method is called for the host context // - properties are changed via hostfxr_set_runtime_property_value // - the host context is closed via 'hostfxr_close' // // If host_context_handle is nullptr and an active host context exists, this function will get the // properties for the active host context. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_properties_fn)( const hostfxr_handle host_context_handle, /*inout*/ size_t * count, /*out*/ const char_t **keys, /*out*/ const char_t **values); // // Load CoreCLR and run the application for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // // Return value: // If the app was successfully run, the exit code of the application. Otherwise, the error code result. // // The host_context_handle must have been initialized using hostfxr_initialize_for_dotnet_command_line. // // This function will not return until the managed application exits. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_run_app_fn)(const hostfxr_handle host_context_handle); // // Gets a typed delegate from the currently loaded CoreCLR or from a newly created one. // // Parameters: // host_context_handle // Handle to the initialized host context // type // Type of runtime delegate requested // delegate // An out parameter that will be assigned the delegate. // // Return value: // The error code result. // // If the host_context_handle was initialized using hostfxr_initialize_for_runtime_config, // then all delegate types are supported. // If the host_context_handle was initialized using hostfxr_initialize_for_dotnet_command_line, // then only the following delegate types are currently supported: // hdt_load_assembly_and_get_function_pointer // hdt_get_function_pointer // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_delegate_fn)( const hostfxr_handle host_context_handle, enum hostfxr_delegate_type type, /*out*/ void **delegate); // // Closes an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // // Return value: // The error code result. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_close_fn)(const hostfxr_handle host_context_handle); struct hostfxr_dotnet_environment_sdk_info { size_t size; const char_t* version; const char_t* path; }; typedef void(HOSTFXR_CALLTYPE* hostfxr_get_dotnet_environment_info_result_fn)( const struct hostfxr_dotnet_environment_info* info, void* result_context); struct hostfxr_dotnet_environment_framework_info { size_t size; const char_t* name; const char_t* version; const char_t* path; }; struct hostfxr_dotnet_environment_info { size_t size; const char_t* hostfxr_version; const char_t* hostfxr_commit_hash; size_t sdk_count; const hostfxr_dotnet_environment_sdk_info* sdks; size_t framework_count; const hostfxr_dotnet_environment_framework_info* frameworks; }; #endif //__HOSTFXR_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __HOSTFXR_H__ #define __HOSTFXR_H__ #include <stddef.h> #include <stdint.h> #if defined(_WIN32) #define HOSTFXR_CALLTYPE __cdecl #ifdef _WCHAR_T_DEFINED typedef wchar_t char_t; #else typedef unsigned short char_t; #endif #else #define HOSTFXR_CALLTYPE typedef char char_t; #endif enum hostfxr_delegate_type { hdt_com_activation, hdt_load_in_memory_assembly, hdt_winrt_activation, hdt_com_register, hdt_com_unregister, hdt_load_assembly_and_get_function_pointer, hdt_get_function_pointer, }; typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_main_fn)(const int argc, const char_t **argv); typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_main_startupinfo_fn)( const int argc, const char_t **argv, const char_t *host_path, const char_t *dotnet_root, const char_t *app_path); typedef int32_t(HOSTFXR_CALLTYPE* hostfxr_main_bundle_startupinfo_fn)( const int argc, const char_t** argv, const char_t* host_path, const char_t* dotnet_root, const char_t* app_path, int64_t bundle_header_offset); typedef void(HOSTFXR_CALLTYPE *hostfxr_error_writer_fn)(const char_t *message); // // Sets a callback which is to be used to write errors to. // // Parameters: // error_writer // A callback function which will be invoked every time an error is to be reported. // Or nullptr to unregister previously registered callback and return to the default behavior. // Return value: // The previously registered callback (which is now unregistered), or nullptr if no previous callback // was registered // // The error writer is registered per-thread, so the registration is thread-local. On each thread // only one callback can be registered. Subsequent registrations overwrite the previous ones. // // By default no callback is registered in which case the errors are written to stderr. // // Each call to the error writer is sort of like writing a single line (the EOL character is omitted). // Multiple calls to the error writer may occure for one failure. // // If the hostfxr invokes functions in hostpolicy as part of its operation, the error writer // will be propagated to hostpolicy for the duration of the call. This means that errors from // both hostfxr and hostpolicy will be reporter through the same error writer. // typedef hostfxr_error_writer_fn(HOSTFXR_CALLTYPE *hostfxr_set_error_writer_fn)(hostfxr_error_writer_fn error_writer); typedef void* hostfxr_handle; struct hostfxr_initialize_parameters { size_t size; const char_t *host_path; const char_t *dotnet_root; }; // // Initializes the hosting components for a dotnet command line running an application // // Parameters: // argc // Number of argv arguments // argv // Command-line arguments for running an application (as if through the dotnet executable). // Only command-line arguments which are accepted by runtime installation are supported, SDK/CLI commands are not supported. // For example 'app.dll app_argument_1 app_argument_2`. // parameters // Optional. Additional parameters for initialization // host_context_handle // On success, this will be populated with an opaque value representing the initialized host context // // Return value: // Success - Hosting components were successfully initialized // HostInvalidState - Hosting components are already initialized // // This function parses the specified command-line arguments to determine the application to run. It will // then find the corresponding .runtimeconfig.json and .deps.json with which to resolve frameworks and // dependencies and prepare everything needed to load the runtime. // // This function only supports arguments for running an application. It does not support SDK commands. // // This function does not load the runtime. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_initialize_for_dotnet_command_line_fn)( int argc, const char_t **argv, const struct hostfxr_initialize_parameters *parameters, /*out*/ hostfxr_handle *host_context_handle); // // Initializes the hosting components using a .runtimeconfig.json file // // Parameters: // runtime_config_path // Path to the .runtimeconfig.json file // parameters // Optional. Additional parameters for initialization // host_context_handle // On success, this will be populated with an opaque value representing the initialized host context // // Return value: // Success - Hosting components were successfully initialized // Success_HostAlreadyInitialized - Config is compatible with already initialized hosting components // Success_DifferentRuntimeProperties - Config has runtime properties that differ from already initialized hosting components // CoreHostIncompatibleConfig - Config is incompatible with already initialized hosting components // // This function will process the .runtimeconfig.json to resolve frameworks and prepare everything needed // to load the runtime. It will only process the .deps.json from frameworks (not any app/component that // may be next to the .runtimeconfig.json). // // This function does not load the runtime. // // If called when the runtime has already been loaded, this function will check if the specified runtime // config is compatible with the existing runtime. // // Both Success_HostAlreadyInitialized and Success_DifferentRuntimeProperties codes are considered successful // initializations. In the case of Success_DifferentRuntimeProperties, it is left to the consumer to verify that // the difference in properties is acceptable. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_initialize_for_runtime_config_fn)( const char_t *runtime_config_path, const struct hostfxr_initialize_parameters *parameters, /*out*/ hostfxr_handle *host_context_handle); // // Gets the runtime property value for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // name // Runtime property name // value // Out parameter. Pointer to a buffer with the property value. // // Return value: // The error code result. // // The buffer pointed to by value is owned by the host context. The lifetime of the buffer is only // guaranteed until any of the below occur: // - a 'run' method is called for the host context // - properties are changed via hostfxr_set_runtime_property_value // - the host context is closed via 'hostfxr_close' // // If host_context_handle is nullptr and an active host context exists, this function will get the // property value for the active host context. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_property_value_fn)( const hostfxr_handle host_context_handle, const char_t *name, /*out*/ const char_t **value); // // Sets the value of a runtime property for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // name // Runtime property name // value // Value to set // // Return value: // The error code result. // // Setting properties is only supported for the first host context, before the runtime has been loaded. // // If the property already exists in the host context, it will be overwritten. If value is nullptr, the // property will be removed. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_set_runtime_property_value_fn)( const hostfxr_handle host_context_handle, const char_t *name, const char_t *value); // // Gets all the runtime properties for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // count // [in] Size of the keys and values buffers // [out] Number of properties returned (size of keys/values buffers used). If the input value is too // small or keys/values is nullptr, this is populated with the number of available properties // keys // Array of pointers to buffers with runtime property keys // values // Array of pointers to buffers with runtime property values // // Return value: // The error code result. // // The buffers pointed to by keys and values are owned by the host context. The lifetime of the buffers is only // guaranteed until any of the below occur: // - a 'run' method is called for the host context // - properties are changed via hostfxr_set_runtime_property_value // - the host context is closed via 'hostfxr_close' // // If host_context_handle is nullptr and an active host context exists, this function will get the // properties for the active host context. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_properties_fn)( const hostfxr_handle host_context_handle, /*inout*/ size_t * count, /*out*/ const char_t **keys, /*out*/ const char_t **values); // // Load CoreCLR and run the application for an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // // Return value: // If the app was successfully run, the exit code of the application. Otherwise, the error code result. // // The host_context_handle must have been initialized using hostfxr_initialize_for_dotnet_command_line. // // This function will not return until the managed application exits. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_run_app_fn)(const hostfxr_handle host_context_handle); // // Gets a typed delegate from the currently loaded CoreCLR or from a newly created one. // // Parameters: // host_context_handle // Handle to the initialized host context // type // Type of runtime delegate requested // delegate // An out parameter that will be assigned the delegate. // // Return value: // The error code result. // // If the host_context_handle was initialized using hostfxr_initialize_for_runtime_config, // then all delegate types are supported. // If the host_context_handle was initialized using hostfxr_initialize_for_dotnet_command_line, // then only the following delegate types are currently supported: // hdt_load_assembly_and_get_function_pointer // hdt_get_function_pointer // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_get_runtime_delegate_fn)( const hostfxr_handle host_context_handle, enum hostfxr_delegate_type type, /*out*/ void **delegate); // // Closes an initialized host context // // Parameters: // host_context_handle // Handle to the initialized host context // // Return value: // The error code result. // typedef int32_t(HOSTFXR_CALLTYPE *hostfxr_close_fn)(const hostfxr_handle host_context_handle); struct hostfxr_dotnet_environment_sdk_info { size_t size; const char_t* version; const char_t* path; }; typedef void(HOSTFXR_CALLTYPE* hostfxr_get_dotnet_environment_info_result_fn)( const struct hostfxr_dotnet_environment_info* info, void* result_context); struct hostfxr_dotnet_environment_framework_info { size_t size; const char_t* name; const char_t* version; const char_t* path; }; struct hostfxr_dotnet_environment_info { size_t size; const char_t* hostfxr_version; const char_t* hostfxr_commit_hash; size_t sdk_count; const hostfxr_dotnet_environment_sdk_info* sdks; size_t framework_count; const hostfxr_dotnet_environment_framework_info* frameworks; }; #endif //__HOSTFXR_H__
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/debug/ee/s390x/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/s390x/primitives.cpp"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/s390x/primitives.cpp"
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/filemapping_memmgt/VirtualProtect/test6/VirtualProtect.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: virtualprotect.c ** ** Purpose: Positive test the VirtualProtect API. ** Call VirtualProtect to set new protect as ** PAGE_NOACCESS ** ** **============================================================*/ #include <palsuite.h> #define REGIONSIZE 1024 PALTEST(filemapping_memmgt_VirtualProtect_test6_paltest_virtualprotect_test6, "filemapping_memmgt/VirtualProtect/test6/paltest_virtualprotect_test6") { int err; LPVOID lpVirtualAddress; DWORD OldProtect; //Initialize the PAL environment err = PAL_Initialize(argc, argv); if(0 != err) { ExitProcess(FAIL); } //Allocate the physical storage in memory or in the paging file on disk lpVirtualAddress = VirtualAlloc(NULL,//determine where to allocate the region REGIONSIZE, //specify the size MEM_COMMIT, //allocation type PAGE_READONLY); //access protection if(NULL == lpVirtualAddress) { Fail("\nFailed to call VirtualAlloc API!\n"); } OldProtect = PAGE_READONLY; //Set new access protection err = VirtualProtect(lpVirtualAddress, REGIONSIZE, //specify the region size PAGE_NOACCESS,//desied access protection &OldProtect);//old access protection if(0 == err) { Trace("\nFailed to call VirtualProtect API!\n"); err = VirtualFree(lpVirtualAddress,REGIONSIZE,MEM_DECOMMIT); if(0 == err) { Fail("\nFailed to call VirtualFree API!\n"); } Fail(""); } //decommit the specified region err = VirtualFree(lpVirtualAddress,REGIONSIZE,MEM_DECOMMIT); if(0 == err) { Fail("\nFailed to call VirtualFree API!\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: virtualprotect.c ** ** Purpose: Positive test the VirtualProtect API. ** Call VirtualProtect to set new protect as ** PAGE_NOACCESS ** ** **============================================================*/ #include <palsuite.h> #define REGIONSIZE 1024 PALTEST(filemapping_memmgt_VirtualProtect_test6_paltest_virtualprotect_test6, "filemapping_memmgt/VirtualProtect/test6/paltest_virtualprotect_test6") { int err; LPVOID lpVirtualAddress; DWORD OldProtect; //Initialize the PAL environment err = PAL_Initialize(argc, argv); if(0 != err) { ExitProcess(FAIL); } //Allocate the physical storage in memory or in the paging file on disk lpVirtualAddress = VirtualAlloc(NULL,//determine where to allocate the region REGIONSIZE, //specify the size MEM_COMMIT, //allocation type PAGE_READONLY); //access protection if(NULL == lpVirtualAddress) { Fail("\nFailed to call VirtualAlloc API!\n"); } OldProtect = PAGE_READONLY; //Set new access protection err = VirtualProtect(lpVirtualAddress, REGIONSIZE, //specify the region size PAGE_NOACCESS,//desied access protection &OldProtect);//old access protection if(0 == err) { Trace("\nFailed to call VirtualProtect API!\n"); err = VirtualFree(lpVirtualAddress,REGIONSIZE,MEM_DECOMMIT); if(0 == err) { Fail("\nFailed to call VirtualFree API!\n"); } Fail(""); } //decommit the specified region err = VirtualFree(lpVirtualAddress,REGIONSIZE,MEM_DECOMMIT); if(0 == err) { Fail("\nFailed to call VirtualFree API!\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/debug_api/WriteProcessMemory/test3/test3.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: test3.c ** ** Purpose: Create a child process and debug it. When the child ** raises an exception, it sends back a memory location. Call ** WriteProcessMemory on the memory location, but attempt to write ** more than the memory allows. This should cause an error and the ** data should be unchanged. ** ** ==============================================================*/ #define UNICODE #include "commonconsts.h" #include <palsuite.h> PALTEST(debug_api_WriteProcessMemory_test3_paltest_writeprocessmemory_test3, "debug_api/WriteProcessMemory/test3/paltest_writeprocessmemory_test3") { PROCESS_INFORMATION pi; STARTUPINFO si; HANDLE hEvToHelper; HANDLE hEvFromHelper; DWORD dwExitCode; DWORD dwRet; BOOL success = TRUE; /* assume success */ char cmdComposeBuf[MAX_PATH]; PWCHAR uniString; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Create the signals we need for cross process communication */ hEvToHelper = CreateEvent(NULL, TRUE, FALSE, szcToHelperEvName); if (!hEvToHelper) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "GetLastError() returned %u.\n", szcToHelperEvName, GetLastError()); } if (GetLastError() == ERROR_ALREADY_EXISTS) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "(already exists!)\n", szcToHelperEvName); } hEvFromHelper = CreateEvent(NULL, TRUE, FALSE, szcFromHelperEvName); if (!hEvToHelper) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "GetLastError() returned %u.\n", szcFromHelperEvName, GetLastError()); } if (GetLastError() == ERROR_ALREADY_EXISTS) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "(already exists!)\n", szcFromHelperEvName); } if (!sprintf_s(cmdComposeBuf, ARRAY_SIZE(cmdComposeBuf), "helper %s", commsFileName)) { Fail("Could not convert command line\n"); } uniString = convert(cmdComposeBuf); ZeroMemory( &si, sizeof(si) ); si.cb = sizeof(si); ZeroMemory( &pi, sizeof(pi) ); /* Create a new process. This is the process that will ask for * memory munging */ if(!CreateProcess( NULL, uniString, NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { Trace("ERROR: CreateProcess failed to load executable '%S'. " "GetLastError() returned %u.\n", uniString, GetLastError()); free(uniString); Fail(""); } free(uniString); while(1) { FILE *commsFile; char* pSrcMemory; char* pDestMemory; int Count; SIZE_T wpmCount; DWORD dwExpectedErrorCode; char incomingCMDBuffer[MAX_PATH + 1]; /* wait until the helper tells us that it has given us * something to do */ dwRet = WaitForSingleObject(hEvFromHelper, TIMEOUT); if (dwRet != WAIT_OBJECT_0) { Trace("test1 WaitForSingleObjectTest: WaitForSingleObject " "failed (%u)\n", GetLastError()); break; /* no more work incoming */ } /* get the parameters to test WriteProcessMemory with */ if (!(commsFile = fopen(commsFileName, "r"))) { /* no file means there is no more work */ break; } if ( NULL == fgets(incomingCMDBuffer, MAX_PATH, commsFile)) { Trace ("unable to read from communication file %s " "for reasons %u & %u\n", errno, GetLastError()); success = FALSE; PEDANTIC1(fclose,(commsFile)); /* it's not worth continuing this trial */ goto doneIteration; } PEDANTIC1(fclose,(commsFile)); sscanf(incomingCMDBuffer, "%u %u %u", &pDestMemory, &Count, &dwExpectedErrorCode); if (argc > 1) { Trace("Preparing to write to %u bytes @ %u ('%s')\n", Count, pDestMemory, incomingCMDBuffer); } /* compose some data to write to the client process */ if (!(pSrcMemory = (char*)malloc(Count))) { Trace("could not dynamically allocate memory to copy from " "for reasons %u & %u\n", errno, GetLastError()); success = FALSE; goto doneIteration; } memset(pSrcMemory, nextValue, Count); /* do the work */ dwRet = WriteProcessMemory(pi.hProcess, pDestMemory, pSrcMemory, Count, &wpmCount); if(dwRet != 0) { Trace("ERROR: Situation: '%s', return code: %u, bytes 'written': %u\n", incomingCMDBuffer, dwRet, wpmCount); Trace("ERROR: WriteProcessMemory did not fail as it should, as " "it attempted to write to a range of memory which was " "not completely accessible.\n"); success = FALSE; } if(GetLastError() != dwExpectedErrorCode) { Trace("ERROR: GetLastError() should have returned " "%u , but instead it returned %u.\n", dwExpectedErrorCode, GetLastError()); success = FALSE; } free(pSrcMemory); doneIteration: PEDANTIC(ResetEvent, (hEvFromHelper)); PEDANTIC(SetEvent, (hEvToHelper)); } /* wait for the child process to complete */ WaitForSingleObject ( pi.hProcess, TIMEOUT ); /* this may return a failure code on a success path */ /* check the exit code from the process */ if( ! GetExitCodeProcess( pi.hProcess, &dwExitCode ) ) { Trace( "GetExitCodeProcess call failed with error code %u\n", GetLastError() ); dwExitCode = FAIL; } if(!success) { dwExitCode = FAIL; } PEDANTIC(CloseHandle, (hEvToHelper)); PEDANTIC(CloseHandle, (hEvFromHelper)); PEDANTIC(CloseHandle, (pi.hThread)); PEDANTIC(CloseHandle, (pi.hProcess)); PAL_TerminateEx(dwExitCode); return dwExitCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: test3.c ** ** Purpose: Create a child process and debug it. When the child ** raises an exception, it sends back a memory location. Call ** WriteProcessMemory on the memory location, but attempt to write ** more than the memory allows. This should cause an error and the ** data should be unchanged. ** ** ==============================================================*/ #define UNICODE #include "commonconsts.h" #include <palsuite.h> PALTEST(debug_api_WriteProcessMemory_test3_paltest_writeprocessmemory_test3, "debug_api/WriteProcessMemory/test3/paltest_writeprocessmemory_test3") { PROCESS_INFORMATION pi; STARTUPINFO si; HANDLE hEvToHelper; HANDLE hEvFromHelper; DWORD dwExitCode; DWORD dwRet; BOOL success = TRUE; /* assume success */ char cmdComposeBuf[MAX_PATH]; PWCHAR uniString; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Create the signals we need for cross process communication */ hEvToHelper = CreateEvent(NULL, TRUE, FALSE, szcToHelperEvName); if (!hEvToHelper) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "GetLastError() returned %u.\n", szcToHelperEvName, GetLastError()); } if (GetLastError() == ERROR_ALREADY_EXISTS) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "(already exists!)\n", szcToHelperEvName); } hEvFromHelper = CreateEvent(NULL, TRUE, FALSE, szcFromHelperEvName); if (!hEvToHelper) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "GetLastError() returned %u.\n", szcFromHelperEvName, GetLastError()); } if (GetLastError() == ERROR_ALREADY_EXISTS) { Fail("WriteProcessMemory: CreateEvent of '%S' failed. " "(already exists!)\n", szcFromHelperEvName); } if (!sprintf_s(cmdComposeBuf, ARRAY_SIZE(cmdComposeBuf), "helper %s", commsFileName)) { Fail("Could not convert command line\n"); } uniString = convert(cmdComposeBuf); ZeroMemory( &si, sizeof(si) ); si.cb = sizeof(si); ZeroMemory( &pi, sizeof(pi) ); /* Create a new process. This is the process that will ask for * memory munging */ if(!CreateProcess( NULL, uniString, NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) { Trace("ERROR: CreateProcess failed to load executable '%S'. " "GetLastError() returned %u.\n", uniString, GetLastError()); free(uniString); Fail(""); } free(uniString); while(1) { FILE *commsFile; char* pSrcMemory; char* pDestMemory; int Count; SIZE_T wpmCount; DWORD dwExpectedErrorCode; char incomingCMDBuffer[MAX_PATH + 1]; /* wait until the helper tells us that it has given us * something to do */ dwRet = WaitForSingleObject(hEvFromHelper, TIMEOUT); if (dwRet != WAIT_OBJECT_0) { Trace("test1 WaitForSingleObjectTest: WaitForSingleObject " "failed (%u)\n", GetLastError()); break; /* no more work incoming */ } /* get the parameters to test WriteProcessMemory with */ if (!(commsFile = fopen(commsFileName, "r"))) { /* no file means there is no more work */ break; } if ( NULL == fgets(incomingCMDBuffer, MAX_PATH, commsFile)) { Trace ("unable to read from communication file %s " "for reasons %u & %u\n", errno, GetLastError()); success = FALSE; PEDANTIC1(fclose,(commsFile)); /* it's not worth continuing this trial */ goto doneIteration; } PEDANTIC1(fclose,(commsFile)); sscanf(incomingCMDBuffer, "%u %u %u", &pDestMemory, &Count, &dwExpectedErrorCode); if (argc > 1) { Trace("Preparing to write to %u bytes @ %u ('%s')\n", Count, pDestMemory, incomingCMDBuffer); } /* compose some data to write to the client process */ if (!(pSrcMemory = (char*)malloc(Count))) { Trace("could not dynamically allocate memory to copy from " "for reasons %u & %u\n", errno, GetLastError()); success = FALSE; goto doneIteration; } memset(pSrcMemory, nextValue, Count); /* do the work */ dwRet = WriteProcessMemory(pi.hProcess, pDestMemory, pSrcMemory, Count, &wpmCount); if(dwRet != 0) { Trace("ERROR: Situation: '%s', return code: %u, bytes 'written': %u\n", incomingCMDBuffer, dwRet, wpmCount); Trace("ERROR: WriteProcessMemory did not fail as it should, as " "it attempted to write to a range of memory which was " "not completely accessible.\n"); success = FALSE; } if(GetLastError() != dwExpectedErrorCode) { Trace("ERROR: GetLastError() should have returned " "%u , but instead it returned %u.\n", dwExpectedErrorCode, GetLastError()); success = FALSE; } free(pSrcMemory); doneIteration: PEDANTIC(ResetEvent, (hEvFromHelper)); PEDANTIC(SetEvent, (hEvToHelper)); } /* wait for the child process to complete */ WaitForSingleObject ( pi.hProcess, TIMEOUT ); /* this may return a failure code on a success path */ /* check the exit code from the process */ if( ! GetExitCodeProcess( pi.hProcess, &dwExitCode ) ) { Trace( "GetExitCodeProcess call failed with error code %u\n", GetLastError() ); dwExitCode = FAIL; } if(!success) { dwExitCode = FAIL; } PEDANTIC(CloseHandle, (hEvToHelper)); PEDANTIC(CloseHandle, (hEvFromHelper)); PEDANTIC(CloseHandle, (pi.hThread)); PEDANTIC(CloseHandle, (pi.hProcess)); PAL_TerminateEx(dwExitCode); return dwExitCode; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/vm/amd64/unixstubs.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" extern "C" { void RedirectForThrowControl() { PORTABILITY_ASSERT("Implement for PAL"); } void __cpuid(int cpuInfo[4], int function_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid\n" \ : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id) ); } void __cpuidex(int cpuInfo[4], int function_id, int subFunction_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid\n" \ : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id), "2"(subFunction_id) ); } DWORD xmmYmmStateSupport() { DWORD eax; __asm(" xgetbv\n" \ : "=a"(eax) /*output in eax*/\ : "c"(0) /*inputs - 0 in ecx*/\ : "edx" /* registers that are clobbered*/ ); // check OS has enabled both XMM and YMM state support return ((eax & 0x06) == 0x06) ? 1 : 0; } void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle) { } };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" extern "C" { void RedirectForThrowControl() { PORTABILITY_ASSERT("Implement for PAL"); } void __cpuid(int cpuInfo[4], int function_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid\n" \ : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id) ); } void __cpuidex(int cpuInfo[4], int function_id, int subFunction_id) { // Based on the Clang implementation provided in cpuid.h: // https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/cpuid.h __asm(" cpuid\n" \ : "=a"(cpuInfo[0]), "=b"(cpuInfo[1]), "=c"(cpuInfo[2]), "=d"(cpuInfo[3]) \ : "0"(function_id), "2"(subFunction_id) ); } DWORD xmmYmmStateSupport() { DWORD eax; __asm(" xgetbv\n" \ : "=a"(eax) /*output in eax*/\ : "c"(0) /*inputs - 0 in ecx*/\ : "edx" /* registers that are clobbered*/ ); // check OS has enabled both XMM and YMM state support return ((eax & 0x06) == 0x06) ? 1 : 0; } void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle) { } };
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/mono/mono/metadata/handle.h
/** * \file * Handle to object in native code * * Authors: * - Ludovic Henry <[email protected]> * - Aleksey Klieger <[email protected]> * - Rodrigo Kumpera <[email protected]> * * Copyright 2016 Dot net foundation. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_HANDLE_H__ #define __MONO_HANDLE_H__ #include <config.h> #include <glib.h> #include <mono/metadata/handle-decl.h> #include <mono/metadata/object.h> #include <mono/metadata/class.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-threads.h> #include <mono/utils/checked-build.h> #include <mono/metadata/class-internals.h> /* Handle stack. The handle stack is designed so it's efficient to pop a large amount of entries at once. The stack is made out of a series of fixed size segments. To do bulk operations you use a stack mark. */ /* 3 is the number of fields besides the data in the struct; 128 words makes each chunk 512 or 1024 bytes each */ #define OBJECTS_PER_HANDLES_CHUNK (128 - 3) /* Whether this config needs stack watermark recording to know where to start scanning from. */ #ifdef HOST_WATCHOS #define MONO_NEEDS_STACK_WATERMARK 1 #endif typedef struct _HandleChunk HandleChunk; /* * Define MONO_HANDLE_TRACK_OWNER to store the file and line number of each call to MONO_HANDLE_NEW * in the handle stack. (This doubles the amount of memory used for handles, so it's only useful for debugging). */ /*#define MONO_HANDLE_TRACK_OWNER*/ /* * Define MONO_HANDLE_TRACK_SP to record the C stack pointer at the time of each HANDLE_FUNCTION_ENTER and * to ensure that when a new handle is allocated the previous newest handle is not lower in the stack. * This is useful to catch missing HANDLE_FUNCTION_ENTER / HANDLE_FUNCTION_RETURN pairs which could cause * handle leaks. */ /*#define MONO_HANDLE_TRACK_SP*/ typedef struct { gpointer o; /* MonoObject ptr */ #ifdef MONO_HANDLE_TRACK_OWNER const char *owner; gpointer backtrace_ips[7]; /* result of backtrace () at time of allocation */ #endif #ifdef MONO_HANDLE_TRACK_SP gpointer alloc_sp; /* sp from HandleStack:stackmark_sp at time of allocation */ #endif } HandleChunkElem; struct _HandleChunk { int size; //number of handles HandleChunk *prev, *next; HandleChunkElem elems [OBJECTS_PER_HANDLES_CHUNK]; }; typedef struct MonoHandleStack { HandleChunk *top; //alloc from here HandleChunk *bottom; //scan from here #ifdef MONO_HANDLE_TRACK_SP gpointer stackmark_sp; // C stack pointer top when from most recent mono_stack_mark_init #endif } HandleStack; // Keep this in sync with RuntimeStructs.cs typedef struct { int size; HandleChunk *chunk; #ifdef MONO_HANDLE_TRACK_SP gpointer prev_sp; // C stack pointer from prior mono_stack_mark_init #endif } HandleStackMark; // There are two types of handles. // Pointers to volatile pointers in managed frames. // These are allocated by icall wrappers in marshal-ilgen.c. // Pointers to non-volatile pointers in TLS. // These are allocated by MONO_HANDLE_NEW. typedef void volatile * MonoRawHandle; typedef void (*GcScanFunc) (gpointer*, gpointer); /* If Centrinel is analyzing Mono, use the SUPPRESS macros to mark the bodies * of the handle macros as allowed to perform operations on raw pointers to * managed objects. Take care to UNSUPPRESS the _arguments_ to the macros - we * want warnings if the argument uses pointers unsafely. */ #ifdef __CENTRINEL__ #define MONO_HANDLE_SUPPRESS_SCOPE(b) __CENTRINEL_SUPPRESS_SCOPE(b) #define MONO_HANDLE_SUPPRESS(expr) __CENTRINEL_SUPPRESS(expr) #define MONO_HANDLE_UNSUPPRESS(expr) __CENTRINEL_UNSUPPRESS(expr) #else #define MONO_HANDLE_SUPPRESS_SCOPE(b) ; #define MONO_HANDLE_SUPPRESS(expr) (expr) #define MONO_HANDLE_UNSUPPRESS(expr) (expr) #endif #ifndef MONO_HANDLE_TRACK_OWNER MONO_COMPONENT_API MonoRawHandle mono_handle_new (MonoObject *object, MonoThreadInfo *info); #else MONO_COMPONENT_API MonoRawHandle mono_handle_new (MonoObject *object, MonoThreadInfo *info, const char* owner); #endif void mono_handle_stack_scan (HandleStack *stack, GcScanFunc func, gpointer gc_data, gboolean precise, gboolean check); gboolean mono_handle_stack_is_empty (HandleStack *stack); HandleStack* mono_handle_stack_alloc (void); void mono_handle_stack_free (HandleStack *handlestack); MonoRawHandle mono_stack_mark_pop_value (MonoThreadInfo *info, HandleStackMark *stackmark, MonoRawHandle value); MONO_COMPONENT_API MonoThreadInfo* mono_stack_mark_record_size (MonoThreadInfo *info, HandleStackMark *stackmark, const char *func_name); void mono_handle_stack_free_domain (HandleStack *stack, MonoDomain *domain); #ifdef MONO_HANDLE_TRACK_SP void mono_handle_chunk_leak_check (HandleStack *handles); #endif static inline void mono_stack_mark_init (MonoThreadInfo *info, HandleStackMark *stackmark) { #ifdef MONO_HANDLE_TRACK_SP gpointer sptop = &stackmark; #endif HandleStack *handles = info->handle_stack; stackmark->size = handles->top->size; stackmark->chunk = handles->top; #ifdef MONO_HANDLE_TRACK_SP stackmark->prev_sp = handles->stackmark_sp; handles->stackmark_sp = sptop; #endif } static inline void mono_stack_mark_pop (MonoThreadInfo *info, HandleStackMark *stackmark) { HandleStack *handles = info->handle_stack; HandleChunk *old_top = stackmark->chunk; old_top->size = stackmark->size; mono_memory_write_barrier (); handles->top = old_top; #ifdef MONO_HANDLE_TRACK_SP mono_memory_write_barrier (); /* write to top before prev_sp */ handles->stackmark_sp = stackmark->prev_sp; #endif } // There are deliberately locals and a constant NULL global with this same name. extern MonoThreadInfo * const mono_thread_info_current_var; /* Icall macros */ #define SETUP_ICALL_COMMON \ do { \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ ERROR_DECL (error); \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current (); \ MONO_RESTORE_WARNING \ #define CLEAR_ICALL_COMMON \ mono_error_set_pending_exception (error); // FIXME There should be fast and slow versions of this, i.e. with and without local variable. #define SETUP_ICALL_FRAME \ HandleStackMark __mark; \ mono_stack_mark_init (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark); #ifdef ENABLE_CHECKED_BUILD /* __FUNCTION__ creates a C string for every icall */ // FIXME This should be one function call since it is not fully inlined. #define CLEAR_ICALL_FRAME \ mono_stack_mark_pop (mono_stack_mark_record_size (mono_thread_info_current_var, &__mark, __FUNCTION__), &__mark); // FIXME This should be one function call since it is not fully inlined. #define CLEAR_ICALL_FRAME_VALUE(RESULT, HANDLE) \ (RESULT) = g_cast (mono_stack_mark_pop_value (mono_stack_mark_record_size (mono_thread_info_current_var, &__mark, __FUNCTION__), &__mark, (HANDLE))); #else #define CLEAR_ICALL_FRAME \ mono_stack_mark_pop (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark); #define CLEAR_ICALL_FRAME_VALUE(RESULT, HANDLE) \ (RESULT) = g_cast (mono_stack_mark_pop_value (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark, (HANDLE))); #endif #define HANDLE_FUNCTION_ENTER() do { \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current (); \ MONO_RESTORE_WARNING \ SETUP_ICALL_FRAME \ #define HANDLE_FUNCTION_RETURN() \ CLEAR_ICALL_FRAME; \ } while (0) // Do not do this often, but icall state can be manually managed. // // SETUP_ICALL_FUNCTION // loop { // Does not have to be a loop. // SETUP_ICALL_FRAME // .. // CLEAR_ICALL_FRAME // } // // As with HANDLE_FUNCTION_RETURN, you must not // skip CLEAR_ICALL_FRAME -- no break, continue, return, or goto (goto label at CLEAR_ICALL_FRAME is idiom). // #define SETUP_ICALL_FUNCTION \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current () \ MONO_RESTORE_WARNING // A common use of manual icall frame management is for loop. // It can also be used for conditionality, where only some paths // through a function allocate handles and frame teardown does // coincide with function return. For example: emit_invoke_call. // #define HANDLE_LOOP_PREPARE SETUP_ICALL_FUNCTION // Return a non-pointer or non-managed pointer, e.g. gboolean. // VAL should be a local variable or at least not use handles in the current frame. // i.e. it is "val", not "expr". #define HANDLE_FUNCTION_RETURN_VAL(VAL) \ CLEAR_ICALL_FRAME; \ return (VAL); \ } while (0) // Return a raw pointer from coop handle. #define HANDLE_FUNCTION_RETURN_OBJ(HANDLE) \ do { \ void* __result = MONO_HANDLE_RAW (HANDLE); \ CLEAR_ICALL_FRAME; \ return g_cast (__result); \ } while (0); } while (0); // Return a coop handle from coop handle. #define HANDLE_FUNCTION_RETURN_REF(TYPE, HANDLE) \ do { \ MonoObjectHandle __result; \ CLEAR_ICALL_FRAME_VALUE (__result.__raw, (HANDLE).__raw); \ return MONO_HANDLE_CAST (TYPE, __result); \ } while (0); } while (0); #ifdef MONO_NEEDS_STACK_WATERMARK static void mono_thread_info_pop_stack_mark (MonoThreadInfo *info, void *old_mark) { info->stack_mark = old_mark; } static void* mono_thread_info_push_stack_mark (MonoThreadInfo *info, void *mark) { void *old = info->stack_mark; info->stack_mark = mark; return old; } #define SETUP_STACK_WATERMARK \ int __dummy; \ __builtin_unwind_init (); \ void *__old_stack_mark = mono_thread_info_push_stack_mark (mono_thread_info_current_var, &__dummy); #define CLEAR_STACK_WATERMARK \ mono_thread_info_pop_stack_mark (mono_thread_info_current_var, __old_stack_mark); #else #define SETUP_STACK_WATERMARK #define CLEAR_STACK_WATERMARK #endif #define ICALL_ENTRY() \ SETUP_ICALL_COMMON \ SETUP_ICALL_FRAME \ SETUP_STACK_WATERMARK #define ICALL_RETURN() \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ CLEAR_ICALL_FRAME \ return; \ } while (0); } while (0) #define ICALL_RETURN_VAL(VAL) \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ CLEAR_ICALL_FRAME \ return VAL; \ } while (0); } while (0) #define ICALL_RETURN_OBJ(HANDLE) \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ void* __ret = MONO_HANDLE_RAW (HANDLE); \ CLEAR_ICALL_FRAME \ return g_cast (__ret); \ } while (0); } while (0) /* Handle macros/functions */ #ifdef MONO_HANDLE_TRACK_OWNER #define STRINGIFY_(x) #x #define STRINGIFY(x) STRINGIFY_(x) #define HANDLE_OWNER (__FILE__ ":" STRINGIFY (__LINE__)) #endif //XXX add functions to get/set raw, set field, set field to null, set array, set array to null #define MONO_HANDLE_DCL(TYPE, NAME) TYPED_HANDLE_NAME(TYPE) NAME = MONO_HANDLE_NEW (TYPE, (NAME ## _raw)) // With Visual C++ compiling as C, the type of a ternary expression // yielding two unrelated non-void pointers is the type of the first, plus a warning. // This can be used to simulate gcc typeof extension. // Otherwise we are forced to evaluate twice, or use C++. #ifdef _MSC_VER typedef struct _MonoTypeofCastHelper *MonoTypeofCastHelper; // a pointer type unrelated to anything else #define MONO_TYPEOF_CAST(typeexpr, expr) __pragma(warning(suppress:4133))(0 ? (typeexpr) : (MonoTypeofCastHelper)(expr)) #else #define MONO_TYPEOF_CAST(typeexpr, expr) ((__typeof__ (typeexpr))(expr)) #endif /* * Create handle for the object OBJECT. * The handle will keep the object alive and pinned. */ #ifndef MONO_HANDLE_TRACK_OWNER #define MONO_HANDLE_NEW(type, object) \ (MONO_HANDLE_CAST_FOR (type) (mono_handle_new (MONO_HANDLE_TYPECHECK_FOR (type) (object), mono_thread_info_current_var))) #else #define MONO_HANDLE_NEW(type, object) \ (MONO_HANDLE_CAST_FOR (type) (mono_handle_new (MONO_HANDLE_TYPECHECK_FOR (type) (object), mono_thread_info_current_var, HANDLE_OWNER))) #endif #define MONO_HANDLE_CAST(type, value) (MONO_HANDLE_CAST_FOR (type) ((value).__raw)) /* * Return the raw object reference stored in the handle. * The objref is valid while the handle is alive and * points to it. */ #ifdef __cplusplus #define MONO_HANDLE_RAW(handle) ((handle).GetRaw()) #else #define MONO_HANDLE_RAW(handle) (MONO_TYPEOF_CAST (*(handle).__raw, mono_handle_raw ((handle).__raw))) #endif #define MONO_HANDLE_IS_NULL(handle) (mono_handle_is_null ((handle).__raw)) #define MONO_BOOL(x) (!!MONO_HANDLE_SUPPRESS (x)) #define MONO_HANDLE_BOOL(handle) (MONO_BOOL (!MONO_HANDLE_IS_NULL (handle))) /* WARNING WARNING WARNING The following functions require a particular evaluation ordering to ensure correctness. We must not have exposed handles while any sort of evaluation is happening as that very evaluation might trigger a safepoint and break us. This is why we evaluate index and value before any call to MONO_HANDLE_RAW or other functions that deal with naked objects. */ #define MONO_HANDLE_SETRAW(HANDLE, FIELD, VALUE) do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ MonoObject *__val = MONO_HANDLE_SUPPRESS ((MonoObject*)(MONO_HANDLE_UNSUPPRESS (VALUE))); \ MONO_OBJECT_SETREF_INTERNAL (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), FIELD, __val); \ } while (0) // handle->field = value for managed pointer #define MONO_HANDLE_SET(HANDLE, FIELD, VALUE) do { \ MonoObjectHandle __val = MONO_HANDLE_CAST (MonoObject, VALUE); \ do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ MONO_OBJECT_SETREF_INTERNAL (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), FIELD, MONO_HANDLE_RAW (__val)); \ } while (0); \ } while (0) // resultHandle = handle->field /* N.B. RESULT is evaluated before HANDLE */ #define MONO_HANDLE_GET(RESULT, HANDLE, FIELD) do { \ MonoObjectHandle __dest = MONO_HANDLE_CAST (MonoObject, RESULT); \ MONO_HANDLE_SUPPRESS (*(gpointer*)__dest.__raw = (gpointer)MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD); \ } while (0) // Get handle->field as a type-handle. #define MONO_HANDLE_NEW_GET(TYPE,HANDLE,FIELD) (MONO_HANDLE_NEW(TYPE,MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD))) // Get handle->field, where field is not a pointer (an integer or non-managed pointer). #define MONO_HANDLE_GETVAL(HANDLE, FIELD) MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD) // Get handle->field as a boolean, i.e. typically compare managed pointer to NULL, // though any type is ok. #define MONO_HANDLE_GET_BOOL(handle, field) (MONO_BOOL (MONO_HANDLE_GETVAL (handle, field))) // handle->field = (type)value, for non-managed pointers // This would be easier to write with the gcc extension typeof, // but it is not widely enough implemented (i.e. Microsoft C). // The value copy is needed in cases computing value causes a GC #define MONO_HANDLE_SETVAL(HANDLE, FIELD, TYPE, VALUE) do { \ TYPE __val = (VALUE); \ if (0) { TYPE * typecheck G_GNUC_UNUSED = &MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (HANDLE)->FIELD); } \ MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD = __val); \ } while (0) // handle [idx] = value (for managed pointers) #define MONO_HANDLE_ARRAY_SETREF(HANDLE, IDX, VALUE) do { \ uintptr_t __idx = (IDX); \ MonoObjectHandle __val = MONO_HANDLE_CAST (MonoObject, VALUE); \ { /* FIXME scope needed by Centrinel */ \ /* FIXME mono_array_setref_fast is not an expression. */ \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ mono_array_setref_fast (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), __idx, MONO_HANDLE_RAW (__val)); \ } \ } while (0) // handle [idx] = (type)value (for non-managed types) #define MONO_HANDLE_ARRAY_SETVAL(HANDLE, TYPE, IDX, VALUE) do { \ uintptr_t __idx = (IDX); \ TYPE __val = (VALUE); \ { /* FIXME scope needed by Centrinel */ \ /* FIXME mono_array_set is not an expression. */ \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ mono_array_set_internal (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), TYPE, __idx, __val); \ } \ } while (0) #if 0 // This is never used. // handle [idx] = value #define MONO_HANDLE_ARRAY_SETRAW(HANDLE, IDX, VALUE) do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ uintptr_t __idx = MONO_HANDLE_UNSUPPRESS(IDX); \ MonoObject *__val = (MonoObject*)(VALUE); \ mono_array_setref_fast (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), __idx, __val); \ } while (0) #endif /* N.B. DEST is evaluated AFTER all the other arguments */ #define MONO_HANDLE_ARRAY_GETVAL(DEST, HANDLE, TYPE, IDX) do { \ MonoArrayHandle __arr = (HANDLE); \ uintptr_t __idx = (IDX); \ TYPE __result = MONO_HANDLE_SUPPRESS (mono_array_get_internal (MONO_HANDLE_RAW(__arr), TYPE, __idx)); \ (DEST) = __result; \ } while (0) // dest = handle [idx] (for managed pointers) #define MONO_HANDLE_ARRAY_GETREF(DEST, HANDLE, IDX) do { \ mono_handle_array_getref (MONO_HANDLE_CAST(MonoObject, (DEST)), (HANDLE), (IDX)); \ } while (0) #define MONO_HANDLE_ASSIGN_RAW(DESTH, SRCP) (mono_handle_assign_raw (MONO_HANDLE_CAST (MonoObject, (DESTH)), (SRCP))) #define MONO_HANDLE_ASSIGN(DESTH, SRCH) (MONO_HANDLE_ASSIGN_RAW ((DESTH), MONO_HANDLE_RAW (SRCH))) #define MONO_HANDLE_DOMAIN(HANDLE) MONO_HANDLE_SUPPRESS (mono_object_domain (MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, MONO_HANDLE_UNSUPPRESS (HANDLE))))) #define mono_handle_domain(handle) MONO_HANDLE_DOMAIN ((handle)) /* Given an object and a MonoClassField, return the value (must be non-object) * of the field. It's the caller's responsibility to check that the object is * of the correct class. */ #define MONO_HANDLE_GET_FIELD_VAL(HANDLE,TYPE,FIELD) (*(TYPE *)(mono_handle_unsafe_field_addr (MONO_HANDLE_CAST (MonoObject, (HANDLE)), (FIELD)))) #define MONO_HANDLE_GET_FIELD_BOOL(handle, type, field) (MONO_BOOL (MONO_HANDLE_GET_FIELD_VAL ((handle), type, (field)))) #define MONO_HANDLE_NEW_GET_FIELD(HANDLE,TYPE,FIELD) MONO_HANDLE_NEW (TYPE, MONO_HANDLE_SUPPRESS (*(TYPE**)(mono_handle_unsafe_field_addr (MONO_HANDLE_CAST (MonoObject, MONO_HANDLE_UNSUPPRESS (HANDLE)), (FIELD))))) #define MONO_HANDLE_SET_FIELD_VAL(HANDLE,TYPE,FIELD,VAL) do { \ MonoObjectHandle __obj = (HANDLE); \ MonoClassField *__field = (FIELD); \ TYPE __value = (VAL); \ *(TYPE*)(mono_handle_unsafe_field_addr (__obj, __field)) = __value; \ } while (0) #define MONO_HANDLE_SET_FIELD_REF(HANDLE,FIELD,VALH) do { \ MonoObjectHandle __obj = MONO_HANDLE_CAST (MonoObject, (HANDLE)); \ MonoClassField *__field = (FIELD); \ MonoObjectHandle __value = MONO_HANDLE_CAST (MonoObject, (VALH)); \ MONO_HANDLE_SUPPRESS (mono_gc_wbarrier_generic_store_internal (mono_handle_unsafe_field_addr (__obj, __field), MONO_HANDLE_RAW (__value))); \ } while (0) #define MONO_HANDLE_GET_CLASS(handle) (MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoObject, (handle)), vtable)->klass) /* Baked typed handles we all want */ TYPED_HANDLE_DECL (MonoString); TYPED_HANDLE_DECL (MonoArray); TYPED_HANDLE_DECL (MonoObject); TYPED_HANDLE_DECL (MonoException); TYPED_HANDLE_DECL (MonoAppContext); /* Simpler version of MONO_HANDLE_NEW if the handle is not used */ #define MONO_HANDLE_PIN(object) do { \ if ((object) != NULL) \ MONO_HANDLE_NEW (MonoObject, (MonoObject*)(object)); \ } while (0) // Structs cannot be cast to structs. // As well, a function is needed because an anonymous struct cannot be initialized in C. static inline MonoObjectHandle mono_handle_cast (gpointer a) { return *(MonoObjectHandle*)&a; } static inline MONO_ALWAYS_INLINE gboolean mono_handle_is_null (MonoRawHandle raw_handle) { MONO_HANDLE_SUPPRESS_SCOPE (1); MonoObjectHandle *handle = (MonoObjectHandle*)&raw_handle; return !handle->__raw || !*handle->__raw; } static inline MONO_ALWAYS_INLINE gpointer mono_handle_raw (MonoRawHandle raw_handle) { MONO_HANDLE_SUPPRESS_SCOPE (1); MonoObjectHandle *handle = (MonoObjectHandle*)&raw_handle; return handle->__raw ? *handle->__raw : NULL; } /* Unfortunately MonoThreadHandle is already a typedef used for something unrelated. So * the coop handle for MonoThread* is MonoThreadObjectHandle. */ typedef MonoThread MonoThreadObject; TYPED_HANDLE_DECL (MonoThreadObject); /* This is the constant for a handle that points nowhere. Constant handles may be initialized to it, but non-constant handles must be NEW'ed. Uses of these are suspicious and should be reviewed and probably changed FIXME. */ #define NULL_HANDLE (mono_null_value_handle ()) #define NULL_HANDLE_INIT { 0 } static inline MonoObjectHandle mono_null_value_handle (void) { MonoObjectHandle result = NULL_HANDLE_INIT; return result; } #define NULL_HANDLE_STRING (MONO_HANDLE_CAST (MonoString, NULL_HANDLE)) #define NULL_HANDLE_ARRAY (MONO_HANDLE_CAST (MonoArray, NULL_HANDLE)) #define NULL_HANDLE_STRING_BUILDER (MONO_HANDLE_CAST (MonoStringBuilder, NULL_HANDLE)) #if __cplusplus // Use this to convert a THandle to a raw T** such as for a ref or out parameter, without // copying back and forth through an intermediate. The handle must already be allocated, // such as icall marshaling does for out and ref parameters. #define MONO_HANDLE_REF(h) (h.Ref ()) #else static inline void volatile* mono_handle_ref (void volatile* p) { g_assert (p); return p; } // Use this to convert a THandle to a raw T** such as for a ref or out parameter, without // copying back and forth through an intermediate. The handle must already be allocated, // such as icall marshaling does for out and ref parameters. #define MONO_HANDLE_REF(handle) (MONO_TYPEOF_CAST ((handle).__raw, mono_handle_ref ((handle).__raw))) #endif static inline MonoObjectHandle mono_handle_assign_raw (MonoObjectHandleOut dest, void *src) { g_assert (dest.__raw); MONO_HANDLE_SUPPRESS (*dest.__raw = (MonoObject*)src); return dest; } /* It is unsafe to call this function directly - it does not pin the handle! Use MONO_HANDLE_GET_FIELD_VAL(). */ static inline gpointer mono_handle_unsafe_field_addr (MonoObjectHandle h, MonoClassField *field) { return MONO_HANDLE_SUPPRESS (((gchar *)MONO_HANDLE_RAW (h)) + field->offset); } /* Matches ObjectHandleOnStack in managed code */ typedef MonoObject **MonoObjectHandleOnStack; #define HANDLE_ON_STACK_SET(handle, obj) do { \ *(handle) = (MonoObject*)obj; \ } while (0) //FIXME this should go somewhere else MonoStringHandle mono_string_new_handle (const char *data, MonoError *error); MonoArrayHandle mono_array_new_handle (MonoClass *eclass, uintptr_t n, MonoError *error); MonoArrayHandle mono_array_new_full_handle (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error); #define mono_array_handle_setref(array,index,value) MONO_HANDLE_ARRAY_SETREF ((array), (index), (value)) void mono_handle_array_getref (MonoObjectHandleOut dest, MonoArrayHandle array, uintptr_t index); #define mono_handle_class(o) MONO_HANDLE_SUPPRESS (mono_object_class (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (o)))) #define mono_handle_vtable(o) MONO_HANDLE_GETVAL (o, vtable) /* Local handles to global GC handles and back */ MonoGCHandle mono_gchandle_from_handle (MonoObjectHandle handle, mono_bool pinned); MonoObjectHandle mono_gchandle_get_target_handle (MonoGCHandle gchandle); gboolean mono_gchandle_target_equal (MonoGCHandle gchandle, MonoObjectHandle equal); void mono_gchandle_target_is_null_or_equal (MonoGCHandle gchandle, MonoObjectHandle equal, gboolean *is_null, gboolean *is_equal); void mono_gchandle_set_target_handle (MonoGCHandle gchandle, MonoObjectHandle obj); void mono_array_handle_memcpy_refs (MonoArrayHandle dest, uintptr_t dest_idx, MonoArrayHandle src, uintptr_t src_idx, uintptr_t len); /* Pins the MonoArray using a gchandle and returns a pointer to the * element with the given index (where each element is of the given * size. Call mono_gchandle_free to unpin. */ gpointer mono_array_handle_pin_with_size (MonoArrayHandle handle, int size, uintptr_t index, MonoGCHandle *gchandle); // Returns a pointer to the element with the given index, but does not pin gpointer mono_array_handle_addr (MonoArrayHandle handle, int size, uintptr_t index); #define MONO_ARRAY_HANDLE_PIN(handle,type,index,gchandle_out) ((type*)mono_array_handle_pin_with_size (MONO_HANDLE_CAST(MonoArray,(handle)), sizeof (type), (index), (gchandle_out))) void mono_value_copy_array_handle (MonoArrayHandle dest, int dest_idx, gconstpointer src, int count); gunichar2 * mono_string_handle_pin_chars (MonoStringHandle s, MonoGCHandle *gchandle_out); gpointer mono_object_handle_pin_unbox (MonoObjectHandle boxed_valuetype_obj, MonoGCHandle *gchandle_out); static inline gpointer mono_handle_unbox_unsafe (MonoObjectHandle handle) { g_assert (m_class_is_valuetype (MONO_HANDLE_GETVAL (handle, vtable)->klass)); return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (handle) + 1); } void mono_error_set_exception_handle (MonoError *error, MonoExceptionHandle exc); MonoGCHandle mono_gchandle_new_weakref_from_handle (MonoObjectHandle handle); int mono_handle_hash (MonoObjectHandle object); MonoGCHandle mono_gchandle_new_weakref_from_handle_track_resurrection (MonoObjectHandle handle); #endif /* __MONO_HANDLE_H__ */
/** * \file * Handle to object in native code * * Authors: * - Ludovic Henry <[email protected]> * - Aleksey Klieger <[email protected]> * - Rodrigo Kumpera <[email protected]> * * Copyright 2016 Dot net foundation. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_HANDLE_H__ #define __MONO_HANDLE_H__ #include <config.h> #include <glib.h> #include <mono/metadata/handle-decl.h> #include <mono/metadata/object.h> #include <mono/metadata/class.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-threads.h> #include <mono/utils/checked-build.h> #include <mono/metadata/class-internals.h> /* Handle stack. The handle stack is designed so it's efficient to pop a large amount of entries at once. The stack is made out of a series of fixed size segments. To do bulk operations you use a stack mark. */ /* 3 is the number of fields besides the data in the struct; 128 words makes each chunk 512 or 1024 bytes each */ #define OBJECTS_PER_HANDLES_CHUNK (128 - 3) /* Whether this config needs stack watermark recording to know where to start scanning from. */ #ifdef HOST_WATCHOS #define MONO_NEEDS_STACK_WATERMARK 1 #endif typedef struct _HandleChunk HandleChunk; /* * Define MONO_HANDLE_TRACK_OWNER to store the file and line number of each call to MONO_HANDLE_NEW * in the handle stack. (This doubles the amount of memory used for handles, so it's only useful for debugging). */ /*#define MONO_HANDLE_TRACK_OWNER*/ /* * Define MONO_HANDLE_TRACK_SP to record the C stack pointer at the time of each HANDLE_FUNCTION_ENTER and * to ensure that when a new handle is allocated the previous newest handle is not lower in the stack. * This is useful to catch missing HANDLE_FUNCTION_ENTER / HANDLE_FUNCTION_RETURN pairs which could cause * handle leaks. */ /*#define MONO_HANDLE_TRACK_SP*/ typedef struct { gpointer o; /* MonoObject ptr */ #ifdef MONO_HANDLE_TRACK_OWNER const char *owner; gpointer backtrace_ips[7]; /* result of backtrace () at time of allocation */ #endif #ifdef MONO_HANDLE_TRACK_SP gpointer alloc_sp; /* sp from HandleStack:stackmark_sp at time of allocation */ #endif } HandleChunkElem; struct _HandleChunk { int size; //number of handles HandleChunk *prev, *next; HandleChunkElem elems [OBJECTS_PER_HANDLES_CHUNK]; }; typedef struct MonoHandleStack { HandleChunk *top; //alloc from here HandleChunk *bottom; //scan from here #ifdef MONO_HANDLE_TRACK_SP gpointer stackmark_sp; // C stack pointer top when from most recent mono_stack_mark_init #endif } HandleStack; // Keep this in sync with RuntimeStructs.cs typedef struct { int size; HandleChunk *chunk; #ifdef MONO_HANDLE_TRACK_SP gpointer prev_sp; // C stack pointer from prior mono_stack_mark_init #endif } HandleStackMark; // There are two types of handles. // Pointers to volatile pointers in managed frames. // These are allocated by icall wrappers in marshal-ilgen.c. // Pointers to non-volatile pointers in TLS. // These are allocated by MONO_HANDLE_NEW. typedef void volatile * MonoRawHandle; typedef void (*GcScanFunc) (gpointer*, gpointer); /* If Centrinel is analyzing Mono, use the SUPPRESS macros to mark the bodies * of the handle macros as allowed to perform operations on raw pointers to * managed objects. Take care to UNSUPPRESS the _arguments_ to the macros - we * want warnings if the argument uses pointers unsafely. */ #ifdef __CENTRINEL__ #define MONO_HANDLE_SUPPRESS_SCOPE(b) __CENTRINEL_SUPPRESS_SCOPE(b) #define MONO_HANDLE_SUPPRESS(expr) __CENTRINEL_SUPPRESS(expr) #define MONO_HANDLE_UNSUPPRESS(expr) __CENTRINEL_UNSUPPRESS(expr) #else #define MONO_HANDLE_SUPPRESS_SCOPE(b) ; #define MONO_HANDLE_SUPPRESS(expr) (expr) #define MONO_HANDLE_UNSUPPRESS(expr) (expr) #endif #ifndef MONO_HANDLE_TRACK_OWNER MONO_COMPONENT_API MonoRawHandle mono_handle_new (MonoObject *object, MonoThreadInfo *info); #else MONO_COMPONENT_API MonoRawHandle mono_handle_new (MonoObject *object, MonoThreadInfo *info, const char* owner); #endif void mono_handle_stack_scan (HandleStack *stack, GcScanFunc func, gpointer gc_data, gboolean precise, gboolean check); gboolean mono_handle_stack_is_empty (HandleStack *stack); HandleStack* mono_handle_stack_alloc (void); void mono_handle_stack_free (HandleStack *handlestack); MonoRawHandle mono_stack_mark_pop_value (MonoThreadInfo *info, HandleStackMark *stackmark, MonoRawHandle value); MONO_COMPONENT_API MonoThreadInfo* mono_stack_mark_record_size (MonoThreadInfo *info, HandleStackMark *stackmark, const char *func_name); void mono_handle_stack_free_domain (HandleStack *stack, MonoDomain *domain); #ifdef MONO_HANDLE_TRACK_SP void mono_handle_chunk_leak_check (HandleStack *handles); #endif static inline void mono_stack_mark_init (MonoThreadInfo *info, HandleStackMark *stackmark) { #ifdef MONO_HANDLE_TRACK_SP gpointer sptop = &stackmark; #endif HandleStack *handles = info->handle_stack; stackmark->size = handles->top->size; stackmark->chunk = handles->top; #ifdef MONO_HANDLE_TRACK_SP stackmark->prev_sp = handles->stackmark_sp; handles->stackmark_sp = sptop; #endif } static inline void mono_stack_mark_pop (MonoThreadInfo *info, HandleStackMark *stackmark) { HandleStack *handles = info->handle_stack; HandleChunk *old_top = stackmark->chunk; old_top->size = stackmark->size; mono_memory_write_barrier (); handles->top = old_top; #ifdef MONO_HANDLE_TRACK_SP mono_memory_write_barrier (); /* write to top before prev_sp */ handles->stackmark_sp = stackmark->prev_sp; #endif } // There are deliberately locals and a constant NULL global with this same name. extern MonoThreadInfo * const mono_thread_info_current_var; /* Icall macros */ #define SETUP_ICALL_COMMON \ do { \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ ERROR_DECL (error); \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current (); \ MONO_RESTORE_WARNING \ #define CLEAR_ICALL_COMMON \ mono_error_set_pending_exception (error); // FIXME There should be fast and slow versions of this, i.e. with and without local variable. #define SETUP_ICALL_FRAME \ HandleStackMark __mark; \ mono_stack_mark_init (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark); #ifdef ENABLE_CHECKED_BUILD /* __FUNCTION__ creates a C string for every icall */ // FIXME This should be one function call since it is not fully inlined. #define CLEAR_ICALL_FRAME \ mono_stack_mark_pop (mono_stack_mark_record_size (mono_thread_info_current_var, &__mark, __FUNCTION__), &__mark); // FIXME This should be one function call since it is not fully inlined. #define CLEAR_ICALL_FRAME_VALUE(RESULT, HANDLE) \ (RESULT) = g_cast (mono_stack_mark_pop_value (mono_stack_mark_record_size (mono_thread_info_current_var, &__mark, __FUNCTION__), &__mark, (HANDLE))); #else #define CLEAR_ICALL_FRAME \ mono_stack_mark_pop (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark); #define CLEAR_ICALL_FRAME_VALUE(RESULT, HANDLE) \ (RESULT) = g_cast (mono_stack_mark_pop_value (mono_thread_info_current_var ? mono_thread_info_current_var : mono_thread_info_current (), &__mark, (HANDLE))); #endif #define HANDLE_FUNCTION_ENTER() do { \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current (); \ MONO_RESTORE_WARNING \ SETUP_ICALL_FRAME \ #define HANDLE_FUNCTION_RETURN() \ CLEAR_ICALL_FRAME; \ } while (0) // Do not do this often, but icall state can be manually managed. // // SETUP_ICALL_FUNCTION // loop { // Does not have to be a loop. // SETUP_ICALL_FRAME // .. // CLEAR_ICALL_FRAME // } // // As with HANDLE_FUNCTION_RETURN, you must not // skip CLEAR_ICALL_FRAME -- no break, continue, return, or goto (goto label at CLEAR_ICALL_FRAME is idiom). // #define SETUP_ICALL_FUNCTION \ MONO_DISABLE_WARNING(4459) /* declaration of 'identifier' hides global declaration */ \ /* There are deliberately locals and a constant NULL global with this same name. */ \ MonoThreadInfo *mono_thread_info_current_var = mono_thread_info_current () \ MONO_RESTORE_WARNING // A common use of manual icall frame management is for loop. // It can also be used for conditionality, where only some paths // through a function allocate handles and frame teardown does // coincide with function return. For example: emit_invoke_call. // #define HANDLE_LOOP_PREPARE SETUP_ICALL_FUNCTION // Return a non-pointer or non-managed pointer, e.g. gboolean. // VAL should be a local variable or at least not use handles in the current frame. // i.e. it is "val", not "expr". #define HANDLE_FUNCTION_RETURN_VAL(VAL) \ CLEAR_ICALL_FRAME; \ return (VAL); \ } while (0) // Return a raw pointer from coop handle. #define HANDLE_FUNCTION_RETURN_OBJ(HANDLE) \ do { \ void* __result = MONO_HANDLE_RAW (HANDLE); \ CLEAR_ICALL_FRAME; \ return g_cast (__result); \ } while (0); } while (0); // Return a coop handle from coop handle. #define HANDLE_FUNCTION_RETURN_REF(TYPE, HANDLE) \ do { \ MonoObjectHandle __result; \ CLEAR_ICALL_FRAME_VALUE (__result.__raw, (HANDLE).__raw); \ return MONO_HANDLE_CAST (TYPE, __result); \ } while (0); } while (0); #ifdef MONO_NEEDS_STACK_WATERMARK static void mono_thread_info_pop_stack_mark (MonoThreadInfo *info, void *old_mark) { info->stack_mark = old_mark; } static void* mono_thread_info_push_stack_mark (MonoThreadInfo *info, void *mark) { void *old = info->stack_mark; info->stack_mark = mark; return old; } #define SETUP_STACK_WATERMARK \ int __dummy; \ __builtin_unwind_init (); \ void *__old_stack_mark = mono_thread_info_push_stack_mark (mono_thread_info_current_var, &__dummy); #define CLEAR_STACK_WATERMARK \ mono_thread_info_pop_stack_mark (mono_thread_info_current_var, __old_stack_mark); #else #define SETUP_STACK_WATERMARK #define CLEAR_STACK_WATERMARK #endif #define ICALL_ENTRY() \ SETUP_ICALL_COMMON \ SETUP_ICALL_FRAME \ SETUP_STACK_WATERMARK #define ICALL_RETURN() \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ CLEAR_ICALL_FRAME \ return; \ } while (0); } while (0) #define ICALL_RETURN_VAL(VAL) \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ CLEAR_ICALL_FRAME \ return VAL; \ } while (0); } while (0) #define ICALL_RETURN_OBJ(HANDLE) \ do { \ CLEAR_STACK_WATERMARK \ CLEAR_ICALL_COMMON \ void* __ret = MONO_HANDLE_RAW (HANDLE); \ CLEAR_ICALL_FRAME \ return g_cast (__ret); \ } while (0); } while (0) /* Handle macros/functions */ #ifdef MONO_HANDLE_TRACK_OWNER #define STRINGIFY_(x) #x #define STRINGIFY(x) STRINGIFY_(x) #define HANDLE_OWNER (__FILE__ ":" STRINGIFY (__LINE__)) #endif //XXX add functions to get/set raw, set field, set field to null, set array, set array to null #define MONO_HANDLE_DCL(TYPE, NAME) TYPED_HANDLE_NAME(TYPE) NAME = MONO_HANDLE_NEW (TYPE, (NAME ## _raw)) // With Visual C++ compiling as C, the type of a ternary expression // yielding two unrelated non-void pointers is the type of the first, plus a warning. // This can be used to simulate gcc typeof extension. // Otherwise we are forced to evaluate twice, or use C++. #ifdef _MSC_VER typedef struct _MonoTypeofCastHelper *MonoTypeofCastHelper; // a pointer type unrelated to anything else #define MONO_TYPEOF_CAST(typeexpr, expr) __pragma(warning(suppress:4133))(0 ? (typeexpr) : (MonoTypeofCastHelper)(expr)) #else #define MONO_TYPEOF_CAST(typeexpr, expr) ((__typeof__ (typeexpr))(expr)) #endif /* * Create handle for the object OBJECT. * The handle will keep the object alive and pinned. */ #ifndef MONO_HANDLE_TRACK_OWNER #define MONO_HANDLE_NEW(type, object) \ (MONO_HANDLE_CAST_FOR (type) (mono_handle_new (MONO_HANDLE_TYPECHECK_FOR (type) (object), mono_thread_info_current_var))) #else #define MONO_HANDLE_NEW(type, object) \ (MONO_HANDLE_CAST_FOR (type) (mono_handle_new (MONO_HANDLE_TYPECHECK_FOR (type) (object), mono_thread_info_current_var, HANDLE_OWNER))) #endif #define MONO_HANDLE_CAST(type, value) (MONO_HANDLE_CAST_FOR (type) ((value).__raw)) /* * Return the raw object reference stored in the handle. * The objref is valid while the handle is alive and * points to it. */ #ifdef __cplusplus #define MONO_HANDLE_RAW(handle) ((handle).GetRaw()) #else #define MONO_HANDLE_RAW(handle) (MONO_TYPEOF_CAST (*(handle).__raw, mono_handle_raw ((handle).__raw))) #endif #define MONO_HANDLE_IS_NULL(handle) (mono_handle_is_null ((handle).__raw)) #define MONO_BOOL(x) (!!MONO_HANDLE_SUPPRESS (x)) #define MONO_HANDLE_BOOL(handle) (MONO_BOOL (!MONO_HANDLE_IS_NULL (handle))) /* WARNING WARNING WARNING The following functions require a particular evaluation ordering to ensure correctness. We must not have exposed handles while any sort of evaluation is happening as that very evaluation might trigger a safepoint and break us. This is why we evaluate index and value before any call to MONO_HANDLE_RAW or other functions that deal with naked objects. */ #define MONO_HANDLE_SETRAW(HANDLE, FIELD, VALUE) do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ MonoObject *__val = MONO_HANDLE_SUPPRESS ((MonoObject*)(MONO_HANDLE_UNSUPPRESS (VALUE))); \ MONO_OBJECT_SETREF_INTERNAL (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), FIELD, __val); \ } while (0) // handle->field = value for managed pointer #define MONO_HANDLE_SET(HANDLE, FIELD, VALUE) do { \ MonoObjectHandle __val = MONO_HANDLE_CAST (MonoObject, VALUE); \ do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ MONO_OBJECT_SETREF_INTERNAL (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), FIELD, MONO_HANDLE_RAW (__val)); \ } while (0); \ } while (0) // resultHandle = handle->field /* N.B. RESULT is evaluated before HANDLE */ #define MONO_HANDLE_GET(RESULT, HANDLE, FIELD) do { \ MonoObjectHandle __dest = MONO_HANDLE_CAST (MonoObject, RESULT); \ MONO_HANDLE_SUPPRESS (*(gpointer*)__dest.__raw = (gpointer)MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD); \ } while (0) // Get handle->field as a type-handle. #define MONO_HANDLE_NEW_GET(TYPE,HANDLE,FIELD) (MONO_HANDLE_NEW(TYPE,MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD))) // Get handle->field, where field is not a pointer (an integer or non-managed pointer). #define MONO_HANDLE_GETVAL(HANDLE, FIELD) MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD) // Get handle->field as a boolean, i.e. typically compare managed pointer to NULL, // though any type is ok. #define MONO_HANDLE_GET_BOOL(handle, field) (MONO_BOOL (MONO_HANDLE_GETVAL (handle, field))) // handle->field = (type)value, for non-managed pointers // This would be easier to write with the gcc extension typeof, // but it is not widely enough implemented (i.e. Microsoft C). // The value copy is needed in cases computing value causes a GC #define MONO_HANDLE_SETVAL(HANDLE, FIELD, TYPE, VALUE) do { \ TYPE __val = (VALUE); \ if (0) { TYPE * typecheck G_GNUC_UNUSED = &MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (HANDLE)->FIELD); } \ MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE))->FIELD = __val); \ } while (0) // handle [idx] = value (for managed pointers) #define MONO_HANDLE_ARRAY_SETREF(HANDLE, IDX, VALUE) do { \ uintptr_t __idx = (IDX); \ MonoObjectHandle __val = MONO_HANDLE_CAST (MonoObject, VALUE); \ { /* FIXME scope needed by Centrinel */ \ /* FIXME mono_array_setref_fast is not an expression. */ \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ mono_array_setref_fast (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), __idx, MONO_HANDLE_RAW (__val)); \ } \ } while (0) // handle [idx] = (type)value (for non-managed types) #define MONO_HANDLE_ARRAY_SETVAL(HANDLE, TYPE, IDX, VALUE) do { \ uintptr_t __idx = (IDX); \ TYPE __val = (VALUE); \ { /* FIXME scope needed by Centrinel */ \ /* FIXME mono_array_set is not an expression. */ \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ mono_array_set_internal (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), TYPE, __idx, __val); \ } \ } while (0) #if 0 // This is never used. // handle [idx] = value #define MONO_HANDLE_ARRAY_SETRAW(HANDLE, IDX, VALUE) do { \ MONO_HANDLE_SUPPRESS_SCOPE(1); \ uintptr_t __idx = MONO_HANDLE_UNSUPPRESS(IDX); \ MonoObject *__val = (MonoObject*)(VALUE); \ mono_array_setref_fast (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (HANDLE)), __idx, __val); \ } while (0) #endif /* N.B. DEST is evaluated AFTER all the other arguments */ #define MONO_HANDLE_ARRAY_GETVAL(DEST, HANDLE, TYPE, IDX) do { \ MonoArrayHandle __arr = (HANDLE); \ uintptr_t __idx = (IDX); \ TYPE __result = MONO_HANDLE_SUPPRESS (mono_array_get_internal (MONO_HANDLE_RAW(__arr), TYPE, __idx)); \ (DEST) = __result; \ } while (0) // dest = handle [idx] (for managed pointers) #define MONO_HANDLE_ARRAY_GETREF(DEST, HANDLE, IDX) do { \ mono_handle_array_getref (MONO_HANDLE_CAST(MonoObject, (DEST)), (HANDLE), (IDX)); \ } while (0) #define MONO_HANDLE_ASSIGN_RAW(DESTH, SRCP) (mono_handle_assign_raw (MONO_HANDLE_CAST (MonoObject, (DESTH)), (SRCP))) #define MONO_HANDLE_ASSIGN(DESTH, SRCH) (MONO_HANDLE_ASSIGN_RAW ((DESTH), MONO_HANDLE_RAW (SRCH))) #define MONO_HANDLE_DOMAIN(HANDLE) MONO_HANDLE_SUPPRESS (mono_object_domain (MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, MONO_HANDLE_UNSUPPRESS (HANDLE))))) #define mono_handle_domain(handle) MONO_HANDLE_DOMAIN ((handle)) /* Given an object and a MonoClassField, return the value (must be non-object) * of the field. It's the caller's responsibility to check that the object is * of the correct class. */ #define MONO_HANDLE_GET_FIELD_VAL(HANDLE,TYPE,FIELD) (*(TYPE *)(mono_handle_unsafe_field_addr (MONO_HANDLE_CAST (MonoObject, (HANDLE)), (FIELD)))) #define MONO_HANDLE_GET_FIELD_BOOL(handle, type, field) (MONO_BOOL (MONO_HANDLE_GET_FIELD_VAL ((handle), type, (field)))) #define MONO_HANDLE_NEW_GET_FIELD(HANDLE,TYPE,FIELD) MONO_HANDLE_NEW (TYPE, MONO_HANDLE_SUPPRESS (*(TYPE**)(mono_handle_unsafe_field_addr (MONO_HANDLE_CAST (MonoObject, MONO_HANDLE_UNSUPPRESS (HANDLE)), (FIELD))))) #define MONO_HANDLE_SET_FIELD_VAL(HANDLE,TYPE,FIELD,VAL) do { \ MonoObjectHandle __obj = (HANDLE); \ MonoClassField *__field = (FIELD); \ TYPE __value = (VAL); \ *(TYPE*)(mono_handle_unsafe_field_addr (__obj, __field)) = __value; \ } while (0) #define MONO_HANDLE_SET_FIELD_REF(HANDLE,FIELD,VALH) do { \ MonoObjectHandle __obj = MONO_HANDLE_CAST (MonoObject, (HANDLE)); \ MonoClassField *__field = (FIELD); \ MonoObjectHandle __value = MONO_HANDLE_CAST (MonoObject, (VALH)); \ MONO_HANDLE_SUPPRESS (mono_gc_wbarrier_generic_store_internal (mono_handle_unsafe_field_addr (__obj, __field), MONO_HANDLE_RAW (__value))); \ } while (0) #define MONO_HANDLE_GET_CLASS(handle) (MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoObject, (handle)), vtable)->klass) /* Baked typed handles we all want */ TYPED_HANDLE_DECL (MonoString); TYPED_HANDLE_DECL (MonoArray); TYPED_HANDLE_DECL (MonoObject); TYPED_HANDLE_DECL (MonoException); TYPED_HANDLE_DECL (MonoAppContext); /* Simpler version of MONO_HANDLE_NEW if the handle is not used */ #define MONO_HANDLE_PIN(object) do { \ if ((object) != NULL) \ MONO_HANDLE_NEW (MonoObject, (MonoObject*)(object)); \ } while (0) // Structs cannot be cast to structs. // As well, a function is needed because an anonymous struct cannot be initialized in C. static inline MonoObjectHandle mono_handle_cast (gpointer a) { return *(MonoObjectHandle*)&a; } static inline MONO_ALWAYS_INLINE gboolean mono_handle_is_null (MonoRawHandle raw_handle) { MONO_HANDLE_SUPPRESS_SCOPE (1); MonoObjectHandle *handle = (MonoObjectHandle*)&raw_handle; return !handle->__raw || !*handle->__raw; } static inline MONO_ALWAYS_INLINE gpointer mono_handle_raw (MonoRawHandle raw_handle) { MONO_HANDLE_SUPPRESS_SCOPE (1); MonoObjectHandle *handle = (MonoObjectHandle*)&raw_handle; return handle->__raw ? *handle->__raw : NULL; } /* Unfortunately MonoThreadHandle is already a typedef used for something unrelated. So * the coop handle for MonoThread* is MonoThreadObjectHandle. */ typedef MonoThread MonoThreadObject; TYPED_HANDLE_DECL (MonoThreadObject); /* This is the constant for a handle that points nowhere. Constant handles may be initialized to it, but non-constant handles must be NEW'ed. Uses of these are suspicious and should be reviewed and probably changed FIXME. */ #define NULL_HANDLE (mono_null_value_handle ()) #define NULL_HANDLE_INIT { 0 } static inline MonoObjectHandle mono_null_value_handle (void) { MonoObjectHandle result = NULL_HANDLE_INIT; return result; } #define NULL_HANDLE_STRING (MONO_HANDLE_CAST (MonoString, NULL_HANDLE)) #define NULL_HANDLE_ARRAY (MONO_HANDLE_CAST (MonoArray, NULL_HANDLE)) #define NULL_HANDLE_STRING_BUILDER (MONO_HANDLE_CAST (MonoStringBuilder, NULL_HANDLE)) #if __cplusplus // Use this to convert a THandle to a raw T** such as for a ref or out parameter, without // copying back and forth through an intermediate. The handle must already be allocated, // such as icall marshaling does for out and ref parameters. #define MONO_HANDLE_REF(h) (h.Ref ()) #else static inline void volatile* mono_handle_ref (void volatile* p) { g_assert (p); return p; } // Use this to convert a THandle to a raw T** such as for a ref or out parameter, without // copying back and forth through an intermediate. The handle must already be allocated, // such as icall marshaling does for out and ref parameters. #define MONO_HANDLE_REF(handle) (MONO_TYPEOF_CAST ((handle).__raw, mono_handle_ref ((handle).__raw))) #endif static inline MonoObjectHandle mono_handle_assign_raw (MonoObjectHandleOut dest, void *src) { g_assert (dest.__raw); MONO_HANDLE_SUPPRESS (*dest.__raw = (MonoObject*)src); return dest; } /* It is unsafe to call this function directly - it does not pin the handle! Use MONO_HANDLE_GET_FIELD_VAL(). */ static inline gpointer mono_handle_unsafe_field_addr (MonoObjectHandle h, MonoClassField *field) { return MONO_HANDLE_SUPPRESS (((gchar *)MONO_HANDLE_RAW (h)) + field->offset); } /* Matches ObjectHandleOnStack in managed code */ typedef MonoObject **MonoObjectHandleOnStack; #define HANDLE_ON_STACK_SET(handle, obj) do { \ *(handle) = (MonoObject*)obj; \ } while (0) //FIXME this should go somewhere else MonoStringHandle mono_string_new_handle (const char *data, MonoError *error); MonoArrayHandle mono_array_new_handle (MonoClass *eclass, uintptr_t n, MonoError *error); MonoArrayHandle mono_array_new_full_handle (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error); #define mono_array_handle_setref(array,index,value) MONO_HANDLE_ARRAY_SETREF ((array), (index), (value)) void mono_handle_array_getref (MonoObjectHandleOut dest, MonoArrayHandle array, uintptr_t index); #define mono_handle_class(o) MONO_HANDLE_SUPPRESS (mono_object_class (MONO_HANDLE_RAW (MONO_HANDLE_UNSUPPRESS (o)))) #define mono_handle_vtable(o) MONO_HANDLE_GETVAL (o, vtable) /* Local handles to global GC handles and back */ MonoGCHandle mono_gchandle_from_handle (MonoObjectHandle handle, mono_bool pinned); MonoObjectHandle mono_gchandle_get_target_handle (MonoGCHandle gchandle); gboolean mono_gchandle_target_equal (MonoGCHandle gchandle, MonoObjectHandle equal); void mono_gchandle_target_is_null_or_equal (MonoGCHandle gchandle, MonoObjectHandle equal, gboolean *is_null, gboolean *is_equal); void mono_gchandle_set_target_handle (MonoGCHandle gchandle, MonoObjectHandle obj); void mono_array_handle_memcpy_refs (MonoArrayHandle dest, uintptr_t dest_idx, MonoArrayHandle src, uintptr_t src_idx, uintptr_t len); /* Pins the MonoArray using a gchandle and returns a pointer to the * element with the given index (where each element is of the given * size. Call mono_gchandle_free to unpin. */ gpointer mono_array_handle_pin_with_size (MonoArrayHandle handle, int size, uintptr_t index, MonoGCHandle *gchandle); // Returns a pointer to the element with the given index, but does not pin gpointer mono_array_handle_addr (MonoArrayHandle handle, int size, uintptr_t index); #define MONO_ARRAY_HANDLE_PIN(handle,type,index,gchandle_out) ((type*)mono_array_handle_pin_with_size (MONO_HANDLE_CAST(MonoArray,(handle)), sizeof (type), (index), (gchandle_out))) void mono_value_copy_array_handle (MonoArrayHandle dest, int dest_idx, gconstpointer src, int count); gunichar2 * mono_string_handle_pin_chars (MonoStringHandle s, MonoGCHandle *gchandle_out); gpointer mono_object_handle_pin_unbox (MonoObjectHandle boxed_valuetype_obj, MonoGCHandle *gchandle_out); static inline gpointer mono_handle_unbox_unsafe (MonoObjectHandle handle) { g_assert (m_class_is_valuetype (MONO_HANDLE_GETVAL (handle, vtable)->klass)); return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (handle) + 1); } void mono_error_set_exception_handle (MonoError *error, MonoExceptionHandle exc); MonoGCHandle mono_gchandle_new_weakref_from_handle (MonoObjectHandle handle); int mono_handle_hash (MonoObjectHandle object); MonoGCHandle mono_gchandle_new_weakref_from_handle_track_resurrection (MonoObjectHandle handle); #endif /* __MONO_HANDLE_H__ */
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/palrt/shlwapip.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: shlwapi.h // // Header for ported shlwapi stuff // =========================================================================== #ifndef SHLWAPIP_H_INCLUDED #define SHLWAPIP_H_INCLUDED #define SIZEOF(x) sizeof(x) #define PRIVATE #define PUBLIC #ifndef ASSERT #define ASSERT _ASSERTE #endif #define AssertMsg(f,m) _ASSERTE(f) #define RIP(f) _ASSERTE(f) #define RIPMSG(f,m) _ASSERTE(f) #define IS_VALID_READ_BUFFER(p, t, n) (p != NULL) #define IS_VALID_WRITE_BUFFER(p, t, n) (p != NULL) #define IS_VALID_READ_PTR(p, t) IS_VALID_READ_BUFFER(p, t, 1) #define IS_VALID_WRITE_PTR(p, t) IS_VALID_WRITE_BUFFER(p, t, 1) #define IS_VALID_STRING_PTR(p, c) (p != NULL) #define IS_VALID_STRING_PTRW(p, c) (p != NULL) #endif // ! SHLWAPIP_H_INCLUDED
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: shlwapi.h // // Header for ported shlwapi stuff // =========================================================================== #ifndef SHLWAPIP_H_INCLUDED #define SHLWAPIP_H_INCLUDED #define SIZEOF(x) sizeof(x) #define PRIVATE #define PUBLIC #ifndef ASSERT #define ASSERT _ASSERTE #endif #define AssertMsg(f,m) _ASSERTE(f) #define RIP(f) _ASSERTE(f) #define RIPMSG(f,m) _ASSERTE(f) #define IS_VALID_READ_BUFFER(p, t, n) (p != NULL) #define IS_VALID_WRITE_BUFFER(p, t, n) (p != NULL) #define IS_VALID_READ_PTR(p, t) IS_VALID_READ_BUFFER(p, t, 1) #define IS_VALID_WRITE_PTR(p, t) IS_VALID_WRITE_BUFFER(p, t, 1) #define IS_VALID_STRING_PTR(p, c) (p != NULL) #define IS_VALID_STRING_PTRW(p, c) (p != NULL) #endif // ! SHLWAPIP_H_INCLUDED
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/smdata.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // // Automatically generated code. DO NOT MODIFY! // To generate this file. Do "smgen.exe > SMData.cpp" // // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #include "jitpch.h" // // States in the state machine // // clang-format off const SMState g_SMStates[] = { // {term, len, lng, prev, SMOpcode and SMOpcodeName , offsets } // state ID and name { 0, 0, 0, 0, (SM_OPCODE) 0 /* noshow */, 0 }, // state 0 [invalid] { 0, 0, 0, 0, (SM_OPCODE) 0 /* noshow */, 0 }, // state 1 [start] { 1, 1, 0, 1, (SM_OPCODE) 0 /* noshow */, 0 }, // state 2 [noshow] { 1, 1, 0, 1, (SM_OPCODE) 1 /* ldarg.0 */, 372 }, // state 3 [ldarg.0] { 1, 1, 0, 1, (SM_OPCODE) 2 /* ldarg.1 */, 168 }, // state 4 [ldarg.1] { 1, 1, 0, 1, (SM_OPCODE) 3 /* ldarg.2 */, 170 }, // state 5 [ldarg.2] { 1, 1, 0, 1, (SM_OPCODE) 4 /* ldarg.3 */, 172 }, // state 6 [ldarg.3] { 1, 1, 0, 1, (SM_OPCODE) 5 /* ldloc.0 */, 0 }, // state 7 [ldloc.0] { 1, 1, 0, 1, (SM_OPCODE) 6 /* ldloc.1 */, 0 }, // state 8 [ldloc.1] { 1, 1, 0, 1, (SM_OPCODE) 7 /* ldloc.2 */, 0 }, // state 9 [ldloc.2] { 1, 1, 0, 1, (SM_OPCODE) 8 /* ldloc.3 */, 0 }, // state 10 [ldloc.3] { 1, 1, 0, 1, (SM_OPCODE) 9 /* stloc.0 */, 378 }, // state 11 [stloc.0] { 1, 1, 0, 1, (SM_OPCODE) 10 /* stloc.1 */, 378 }, // state 12 [stloc.1] { 1, 1, 0, 1, (SM_OPCODE) 11 /* stloc.2 */, 378 }, // state 13 [stloc.2] { 1, 1, 0, 1, (SM_OPCODE) 12 /* stloc.3 */, 378 }, // state 14 [stloc.3] { 1, 1, 0, 1, (SM_OPCODE) 13 /* ldarg.s */, 0 }, // state 15 [ldarg.s] { 1, 1, 0, 1, (SM_OPCODE) 14 /* ldarga.s */, 182 }, // state 16 [ldarga.s] { 1, 1, 0, 1, (SM_OPCODE) 15 /* starg.s */, 0 }, // state 17 [starg.s] { 1, 1, 0, 1, (SM_OPCODE) 16 /* ldloc.s */, 0 }, // state 18 [ldloc.s] { 1, 1, 0, 1, (SM_OPCODE) 17 /* ldloca.s */, 184 }, // state 19 [ldloca.s] { 1, 1, 0, 1, (SM_OPCODE) 18 /* stloc.s */, 0 }, // state 20 [stloc.s] { 1, 1, 0, 1, (SM_OPCODE) 19 /* ldnull */, 0 }, // state 21 [ldnull] { 1, 1, 0, 1, (SM_OPCODE) 20 /* ldc.i4.m1 */, 0 }, // state 22 [ldc.i4.m1] { 1, 1, 0, 1, (SM_OPCODE) 21 /* ldc.i4.0 */, 0 }, // state 23 [ldc.i4.0] { 1, 1, 0, 1, (SM_OPCODE) 22 /* ldc.i4.1 */, 0 }, // state 24 [ldc.i4.1] { 1, 1, 0, 1, (SM_OPCODE) 23 /* ldc.i4.2 */, 0 }, // state 25 [ldc.i4.2] { 1, 1, 0, 1, (SM_OPCODE) 24 /* ldc.i4.3 */, 0 }, // state 26 [ldc.i4.3] { 1, 1, 0, 1, (SM_OPCODE) 25 /* ldc.i4.4 */, 0 }, // state 27 [ldc.i4.4] { 1, 1, 0, 1, (SM_OPCODE) 26 /* ldc.i4.5 */, 0 }, // state 28 [ldc.i4.5] { 1, 1, 0, 1, (SM_OPCODE) 27 /* ldc.i4.6 */, 0 }, // state 29 [ldc.i4.6] { 1, 1, 0, 1, (SM_OPCODE) 28 /* ldc.i4.7 */, 0 }, // state 30 [ldc.i4.7] { 1, 1, 0, 1, (SM_OPCODE) 29 /* ldc.i4.8 */, 0 }, // state 31 [ldc.i4.8] { 1, 1, 0, 1, (SM_OPCODE) 30 /* ldc.i4.s */, 0 }, // state 32 [ldc.i4.s] { 1, 1, 0, 1, (SM_OPCODE) 31 /* ldc.i4 */, 0 }, // state 33 [ldc.i4] { 1, 1, 0, 1, (SM_OPCODE) 32 /* ldc.i8 */, 0 }, // state 34 [ldc.i8] { 1, 1, 0, 1, (SM_OPCODE) 33 /* ldc.r4 */, 252 }, // state 35 [ldc.r4] { 1, 1, 0, 1, (SM_OPCODE) 34 /* ldc.r8 */, 268 }, // state 36 [ldc.r8] { 1, 1, 0, 1, (SM_OPCODE) 35 /* unused */, 0 }, // state 37 [unused] { 1, 1, 0, 1, (SM_OPCODE) 36 /* dup */, 0 }, // state 38 [dup] { 1, 1, 0, 1, (SM_OPCODE) 37 /* pop */, 0 }, // state 39 [pop] { 1, 1, 0, 1, (SM_OPCODE) 38 /* call */, 0 }, // state 40 [call] { 1, 1, 0, 1, (SM_OPCODE) 39 /* calli */, 0 }, // state 41 [calli] { 1, 1, 0, 1, (SM_OPCODE) 40 /* ret */, 0 }, // state 42 [ret] { 1, 1, 0, 1, (SM_OPCODE) 41 /* br.s */, 0 }, // state 43 [br.s] { 1, 1, 0, 1, (SM_OPCODE) 42 /* brfalse.s */, 0 }, // state 44 [brfalse.s] { 1, 1, 0, 1, (SM_OPCODE) 43 /* brtrue.s */, 0 }, // state 45 [brtrue.s] { 1, 1, 0, 1, (SM_OPCODE) 44 /* beq.s */, 0 }, // state 46 [beq.s] { 1, 1, 0, 1, (SM_OPCODE) 45 /* bge.s */, 0 }, // state 47 [bge.s] { 1, 1, 0, 1, (SM_OPCODE) 46 /* bgt.s */, 0 }, // state 48 [bgt.s] { 1, 1, 0, 1, (SM_OPCODE) 47 /* ble.s */, 0 }, // state 49 [ble.s] { 1, 1, 0, 1, (SM_OPCODE) 48 /* blt.s */, 0 }, // state 50 [blt.s] { 1, 1, 0, 1, (SM_OPCODE) 49 /* bne.un.s */, 0 }, // state 51 [bne.un.s] { 1, 1, 0, 1, (SM_OPCODE) 50 /* bge.un.s */, 0 }, // state 52 [bge.un.s] { 1, 1, 0, 1, (SM_OPCODE) 51 /* bgt.un.s */, 0 }, // state 53 [bgt.un.s] { 1, 1, 0, 1, (SM_OPCODE) 52 /* ble.un.s */, 0 }, // state 54 [ble.un.s] { 1, 1, 0, 1, (SM_OPCODE) 53 /* blt.un.s */, 0 }, // state 55 [blt.un.s] { 1, 1, 0, 1, (SM_OPCODE) 54 /* long.branch */, 0 }, // state 56 [long.branch] { 1, 1, 0, 1, (SM_OPCODE) 55 /* switch */, 0 }, // state 57 [switch] { 1, 1, 0, 1, (SM_OPCODE) 56 /* ldind.i1 */, 0 }, // state 58 [ldind.i1] { 1, 1, 0, 1, (SM_OPCODE) 57 /* ldind.u1 */, 0 }, // state 59 [ldind.u1] { 1, 1, 0, 1, (SM_OPCODE) 58 /* ldind.i2 */, 0 }, // state 60 [ldind.i2] { 1, 1, 0, 1, (SM_OPCODE) 59 /* ldind.u2 */, 0 }, // state 61 [ldind.u2] { 1, 1, 0, 1, (SM_OPCODE) 60 /* ldind.i4 */, 0 }, // state 62 [ldind.i4] { 1, 1, 0, 1, (SM_OPCODE) 61 /* ldind.u4 */, 0 }, // state 63 [ldind.u4] { 1, 1, 0, 1, (SM_OPCODE) 62 /* ldind.i8 */, 0 }, // state 64 [ldind.i8] { 1, 1, 0, 1, (SM_OPCODE) 63 /* ldind.i */, 0 }, // state 65 [ldind.i] { 1, 1, 0, 1, (SM_OPCODE) 64 /* ldind.r4 */, 0 }, // state 66 [ldind.r4] { 1, 1, 0, 1, (SM_OPCODE) 65 /* ldind.r8 */, 0 }, // state 67 [ldind.r8] { 1, 1, 0, 1, (SM_OPCODE) 66 /* ldind.ref */, 0 }, // state 68 [ldind.ref] { 1, 1, 0, 1, (SM_OPCODE) 67 /* stind.ref */, 0 }, // state 69 [stind.ref] { 1, 1, 0, 1, (SM_OPCODE) 68 /* stind.i1 */, 0 }, // state 70 [stind.i1] { 1, 1, 0, 1, (SM_OPCODE) 69 /* stind.i2 */, 0 }, // state 71 [stind.i2] { 1, 1, 0, 1, (SM_OPCODE) 70 /* stind.i4 */, 0 }, // state 72 [stind.i4] { 1, 1, 0, 1, (SM_OPCODE) 71 /* stind.i8 */, 0 }, // state 73 [stind.i8] { 1, 1, 0, 1, (SM_OPCODE) 72 /* stind.r4 */, 0 }, // state 74 [stind.r4] { 1, 1, 0, 1, (SM_OPCODE) 73 /* stind.r8 */, 0 }, // state 75 [stind.r8] { 1, 1, 0, 1, (SM_OPCODE) 74 /* add */, 0 }, // state 76 [add] { 1, 1, 0, 1, (SM_OPCODE) 75 /* sub */, 0 }, // state 77 [sub] { 1, 1, 0, 1, (SM_OPCODE) 76 /* mul */, 0 }, // state 78 [mul] { 1, 1, 0, 1, (SM_OPCODE) 77 /* div */, 0 }, // state 79 [div] { 1, 1, 0, 1, (SM_OPCODE) 78 /* div.un */, 0 }, // state 80 [div.un] { 1, 1, 0, 1, (SM_OPCODE) 79 /* rem */, 0 }, // state 81 [rem] { 1, 1, 0, 1, (SM_OPCODE) 80 /* rem.un */, 0 }, // state 82 [rem.un] { 1, 1, 0, 1, (SM_OPCODE) 81 /* and */, 0 }, // state 83 [and] { 1, 1, 0, 1, (SM_OPCODE) 82 /* or */, 0 }, // state 84 [or] { 1, 1, 0, 1, (SM_OPCODE) 83 /* xor */, 0 }, // state 85 [xor] { 1, 1, 0, 1, (SM_OPCODE) 84 /* shl */, 0 }, // state 86 [shl] { 1, 1, 0, 1, (SM_OPCODE) 85 /* shr */, 0 }, // state 87 [shr] { 1, 1, 0, 1, (SM_OPCODE) 86 /* shr.un */, 0 }, // state 88 [shr.un] { 1, 1, 0, 1, (SM_OPCODE) 87 /* neg */, 0 }, // state 89 [neg] { 1, 1, 0, 1, (SM_OPCODE) 88 /* not */, 0 }, // state 90 [not] { 1, 1, 0, 1, (SM_OPCODE) 89 /* conv.i1 */, 0 }, // state 91 [conv.i1] { 1, 1, 0, 1, (SM_OPCODE) 90 /* conv.i2 */, 0 }, // state 92 [conv.i2] { 1, 1, 0, 1, (SM_OPCODE) 91 /* conv.i4 */, 0 }, // state 93 [conv.i4] { 1, 1, 0, 1, (SM_OPCODE) 92 /* conv.i8 */, 0 }, // state 94 [conv.i8] { 1, 1, 0, 1, (SM_OPCODE) 93 /* conv.r4 */, 276 }, // state 95 [conv.r4] { 1, 1, 0, 1, (SM_OPCODE) 94 /* conv.r8 */, 256 }, // state 96 [conv.r8] { 1, 1, 0, 1, (SM_OPCODE) 95 /* conv.u4 */, 0 }, // state 97 [conv.u4] { 1, 1, 0, 1, (SM_OPCODE) 96 /* conv.u8 */, 0 }, // state 98 [conv.u8] { 1, 1, 0, 1, (SM_OPCODE) 97 /* callvirt */, 0 }, // state 99 [callvirt] { 1, 1, 0, 1, (SM_OPCODE) 98 /* cpobj */, 0 }, // state 100 [cpobj] { 1, 1, 0, 1, (SM_OPCODE) 99 /* ldobj */, 0 }, // state 101 [ldobj] { 1, 1, 0, 1, (SM_OPCODE)100 /* ldstr */, 0 }, // state 102 [ldstr] { 1, 1, 0, 1, (SM_OPCODE)101 /* newobj */, 0 }, // state 103 [newobj] { 1, 1, 0, 1, (SM_OPCODE)102 /* castclass */, 0 }, // state 104 [castclass] { 1, 1, 0, 1, (SM_OPCODE)103 /* isinst */, 0 }, // state 105 [isinst] { 1, 1, 0, 1, (SM_OPCODE)104 /* conv.r.un */, 0 }, // state 106 [conv.r.un] { 1, 1, 0, 1, (SM_OPCODE)105 /* unbox */, 0 }, // state 107 [unbox] { 1, 1, 0, 1, (SM_OPCODE)106 /* throw */, 0 }, // state 108 [throw] { 1, 1, 0, 1, (SM_OPCODE)107 /* ldfld */, 0 }, // state 109 [ldfld] { 1, 1, 0, 1, (SM_OPCODE)108 /* ldflda */, 0 }, // state 110 [ldflda] { 1, 1, 0, 1, (SM_OPCODE)109 /* stfld */, 0 }, // state 111 [stfld] { 1, 1, 0, 1, (SM_OPCODE)110 /* ldsfld */, 0 }, // state 112 [ldsfld] { 1, 1, 0, 1, (SM_OPCODE)111 /* ldsflda */, 0 }, // state 113 [ldsflda] { 1, 1, 0, 1, (SM_OPCODE)112 /* stsfld */, 0 }, // state 114 [stsfld] { 1, 1, 0, 1, (SM_OPCODE)113 /* stobj */, 0 }, // state 115 [stobj] { 1, 1, 0, 1, (SM_OPCODE)114 /* ovf.notype.un */, 0 }, // state 116 [ovf.notype.un] { 1, 1, 0, 1, (SM_OPCODE)115 /* box */, 0 }, // state 117 [box] { 1, 1, 0, 1, (SM_OPCODE)116 /* newarr */, 0 }, // state 118 [newarr] { 1, 1, 0, 1, (SM_OPCODE)117 /* ldlen */, 0 }, // state 119 [ldlen] { 1, 1, 0, 1, (SM_OPCODE)118 /* ldelema */, 0 }, // state 120 [ldelema] { 1, 1, 0, 1, (SM_OPCODE)119 /* ldelem.i1 */, 0 }, // state 121 [ldelem.i1] { 1, 1, 0, 1, (SM_OPCODE)120 /* ldelem.u1 */, 0 }, // state 122 [ldelem.u1] { 1, 1, 0, 1, (SM_OPCODE)121 /* ldelem.i2 */, 0 }, // state 123 [ldelem.i2] { 1, 1, 0, 1, (SM_OPCODE)122 /* ldelem.u2 */, 0 }, // state 124 [ldelem.u2] { 1, 1, 0, 1, (SM_OPCODE)123 /* ldelem.i4 */, 0 }, // state 125 [ldelem.i4] { 1, 1, 0, 1, (SM_OPCODE)124 /* ldelem.u4 */, 0 }, // state 126 [ldelem.u4] { 1, 1, 0, 1, (SM_OPCODE)125 /* ldelem.i8 */, 0 }, // state 127 [ldelem.i8] { 1, 1, 0, 1, (SM_OPCODE)126 /* ldelem.i */, 0 }, // state 128 [ldelem.i] { 1, 1, 0, 1, (SM_OPCODE)127 /* ldelem.r4 */, 0 }, // state 129 [ldelem.r4] { 1, 1, 0, 1, (SM_OPCODE)128 /* ldelem.r8 */, 0 }, // state 130 [ldelem.r8] { 1, 1, 0, 1, (SM_OPCODE)129 /* ldelem.ref */, 0 }, // state 131 [ldelem.ref] { 1, 1, 0, 1, (SM_OPCODE)130 /* stelem.i */, 0 }, // state 132 [stelem.i] { 1, 1, 0, 1, (SM_OPCODE)131 /* stelem.i1 */, 0 }, // state 133 [stelem.i1] { 1, 1, 0, 1, (SM_OPCODE)132 /* stelem.i2 */, 0 }, // state 134 [stelem.i2] { 1, 1, 0, 1, (SM_OPCODE)133 /* stelem.i4 */, 0 }, // state 135 [stelem.i4] { 1, 1, 0, 1, (SM_OPCODE)134 /* stelem.i8 */, 0 }, // state 136 [stelem.i8] { 1, 1, 0, 1, (SM_OPCODE)135 /* stelem.r4 */, 0 }, // state 137 [stelem.r4] { 1, 1, 0, 1, (SM_OPCODE)136 /* stelem.r8 */, 0 }, // state 138 [stelem.r8] { 1, 1, 0, 1, (SM_OPCODE)137 /* stelem.ref */, 0 }, // state 139 [stelem.ref] { 1, 1, 0, 1, (SM_OPCODE)138 /* ldelem */, 0 }, // state 140 [ldelem] { 1, 1, 0, 1, (SM_OPCODE)139 /* stelem */, 0 }, // state 141 [stelem] { 1, 1, 0, 1, (SM_OPCODE)140 /* unbox.any */, 0 }, // state 142 [unbox.any] { 1, 1, 0, 1, (SM_OPCODE)141 /* conv.ovf.i1 */, 0 }, // state 143 [conv.ovf.i1] { 1, 1, 0, 1, (SM_OPCODE)142 /* conv.ovf.u1 */, 0 }, // state 144 [conv.ovf.u1] { 1, 1, 0, 1, (SM_OPCODE)143 /* conv.ovf.i2 */, 0 }, // state 145 [conv.ovf.i2] { 1, 1, 0, 1, (SM_OPCODE)144 /* conv.ovf.u2 */, 0 }, // state 146 [conv.ovf.u2] { 1, 1, 0, 1, (SM_OPCODE)145 /* conv.ovf.i4 */, 0 }, // state 147 [conv.ovf.i4] { 1, 1, 0, 1, (SM_OPCODE)146 /* conv.ovf.u4 */, 0 }, // state 148 [conv.ovf.u4] { 1, 1, 0, 1, (SM_OPCODE)147 /* conv.ovf.i8 */, 0 }, // state 149 [conv.ovf.i8] { 1, 1, 0, 1, (SM_OPCODE)148 /* conv.ovf.u8 */, 0 }, // state 150 [conv.ovf.u8] { 1, 1, 0, 1, (SM_OPCODE)149 /* refanyval */, 0 }, // state 151 [refanyval] { 1, 1, 0, 1, (SM_OPCODE)150 /* ckfinite */, 0 }, // state 152 [ckfinite] { 1, 1, 0, 1, (SM_OPCODE)151 /* mkrefany */, 0 }, // state 153 [mkrefany] { 1, 1, 0, 1, (SM_OPCODE)152 /* ldtoken */, 0 }, // state 154 [ldtoken] { 1, 1, 0, 1, (SM_OPCODE)153 /* conv.u2 */, 0 }, // state 155 [conv.u2] { 1, 1, 0, 1, (SM_OPCODE)154 /* conv.u1 */, 0 }, // state 156 [conv.u1] { 1, 1, 0, 1, (SM_OPCODE)155 /* conv.i */, 0 }, // state 157 [conv.i] { 1, 1, 0, 1, (SM_OPCODE)156 /* conv.ovf.i */, 0 }, // state 158 [conv.ovf.i] { 1, 1, 0, 1, (SM_OPCODE)157 /* conv.ovf.u */, 0 }, // state 159 [conv.ovf.u] { 1, 1, 0, 1, (SM_OPCODE)158 /* add.ovf */, 0 }, // state 160 [add.ovf] { 1, 1, 0, 1, (SM_OPCODE)159 /* mul.ovf */, 0 }, // state 161 [mul.ovf] { 1, 1, 0, 1, (SM_OPCODE)160 /* sub.ovf */, 0 }, // state 162 [sub.ovf] { 1, 1, 0, 1, (SM_OPCODE)161 /* leave.s */, 0 }, // state 163 [leave.s] { 1, 1, 0, 1, (SM_OPCODE)162 /* stind.i */, 0 }, // state 164 [stind.i] { 1, 1, 0, 1, (SM_OPCODE)163 /* conv.u */, 0 }, // state 165 [conv.u] { 1, 1, 0, 1, (SM_OPCODE)164 /* prefix.n */, 0 }, // state 166 [prefix.n] { 1, 1, 0, 1, (SM_OPCODE)165 /* arglist */, 0 }, // state 167 [arglist] { 1, 1, 0, 1, (SM_OPCODE)166 /* ceq */, 0 }, // state 168 [ceq] { 1, 1, 0, 1, (SM_OPCODE)167 /* cgt */, 0 }, // state 169 [cgt] { 1, 1, 0, 1, (SM_OPCODE)168 /* cgt.un */, 0 }, // state 170 [cgt.un] { 1, 1, 0, 1, (SM_OPCODE)169 /* clt */, 0 }, // state 171 [clt] { 1, 1, 0, 1, (SM_OPCODE)170 /* clt.un */, 0 }, // state 172 [clt.un] { 1, 1, 0, 1, (SM_OPCODE)171 /* ldftn */, 0 }, // state 173 [ldftn] { 1, 1, 0, 1, (SM_OPCODE)172 /* ldvirtftn */, 0 }, // state 174 [ldvirtftn] { 1, 1, 0, 1, (SM_OPCODE)173 /* long.loc.arg */, 0 }, // state 175 [long.loc.arg] { 1, 1, 0, 1, (SM_OPCODE)174 /* localloc */, 0 }, // state 176 [localloc] { 1, 1, 0, 1, (SM_OPCODE)175 /* unaligned */, 0 }, // state 177 [unaligned] { 1, 1, 0, 1, (SM_OPCODE)176 /* volatile */, 0 }, // state 178 [volatile] { 1, 1, 0, 1, (SM_OPCODE)177 /* tailcall */, 0 }, // state 179 [tailcall] { 1, 1, 0, 1, (SM_OPCODE)178 /* initobj */, 0 }, // state 180 [initobj] { 1, 1, 0, 1, (SM_OPCODE)179 /* constrained */, 218 }, // state 181 [constrained] { 1, 1, 0, 1, (SM_OPCODE)180 /* cpblk */, 0 }, // state 182 [cpblk] { 1, 1, 0, 1, (SM_OPCODE)181 /* initblk */, 0 }, // state 183 [initblk] { 1, 1, 0, 1, (SM_OPCODE)182 /* rethrow */, 0 }, // state 184 [rethrow] { 1, 1, 0, 1, (SM_OPCODE)183 /* sizeof */, 0 }, // state 185 [sizeof] { 1, 1, 0, 1, (SM_OPCODE)184 /* refanytype */, 0 }, // state 186 [refanytype] { 1, 1, 0, 1, (SM_OPCODE)185 /* readonly */, 0 }, // state 187 [readonly] { 1, 1, 0, 1, (SM_OPCODE)186 /* ldarga.s.normed */, 218 }, // state 188 [ldarga.s.normed] { 1, 1, 0, 1, (SM_OPCODE)187 /* ldloca.s.normed */, 220 }, // state 189 [ldloca.s.normed] { 1, 2, 181, 181, (SM_OPCODE) 97 /* callvirt */, 0 }, // state 190 [constrained -> callvirt] { 1, 2, 3, 3, (SM_OPCODE)107 /* ldfld */, 432 }, // state 191 [ldarg.0 -> ldfld] { 1, 2, 4, 4, (SM_OPCODE)107 /* ldfld */, 0 }, // state 192 [ldarg.1 -> ldfld] { 1, 2, 5, 5, (SM_OPCODE)107 /* ldfld */, 0 }, // state 193 [ldarg.2 -> ldfld] { 1, 2, 6, 6, (SM_OPCODE)107 /* ldfld */, 0 }, // state 194 [ldarg.3 -> ldfld] { 1, 2, 16, 16, (SM_OPCODE)107 /* ldfld */, 414 }, // state 195 [ldarga.s -> ldfld] { 1, 2, 19, 19, (SM_OPCODE)107 /* ldfld */, 0 }, // state 196 [ldloca.s -> ldfld] { 1, 2, 188, 188, (SM_OPCODE)107 /* ldfld */, 0 }, // state 197 [ldarga.s.normed -> ldfld] { 1, 2, 189, 189, (SM_OPCODE)107 /* ldfld */, 0 }, // state 198 [ldloca.s.normed -> ldfld] { 1, 2, 11, 11, (SM_OPCODE) 5 /* ldloc.0 */, 0 }, // state 199 [stloc.0 -> ldloc.0] { 1, 2, 12, 12, (SM_OPCODE) 6 /* ldloc.1 */, 0 }, // state 200 [stloc.1 -> ldloc.1] { 1, 2, 13, 13, (SM_OPCODE) 7 /* ldloc.2 */, 0 }, // state 201 [stloc.2 -> ldloc.2] { 1, 2, 14, 14, (SM_OPCODE) 8 /* ldloc.3 */, 0 }, // state 202 [stloc.3 -> ldloc.3] { 1, 2, 35, 35, (SM_OPCODE) 74 /* add */, 0 }, // state 203 [ldc.r4 -> add] { 1, 2, 35, 35, (SM_OPCODE) 75 /* sub */, 0 }, // state 204 [ldc.r4 -> sub] { 1, 2, 35, 35, (SM_OPCODE) 76 /* mul */, 0 }, // state 205 [ldc.r4 -> mul] { 1, 2, 35, 35, (SM_OPCODE) 77 /* div */, 0 }, // state 206 [ldc.r4 -> div] { 1, 2, 36, 36, (SM_OPCODE) 74 /* add */, 0 }, // state 207 [ldc.r8 -> add] { 1, 2, 36, 36, (SM_OPCODE) 75 /* sub */, 0 }, // state 208 [ldc.r8 -> sub] { 1, 2, 36, 36, (SM_OPCODE) 76 /* mul */, 0 }, // state 209 [ldc.r8 -> mul] { 1, 2, 36, 36, (SM_OPCODE) 77 /* div */, 0 }, // state 210 [ldc.r8 -> div] { 1, 2, 95, 95, (SM_OPCODE) 74 /* add */, 0 }, // state 211 [conv.r4 -> add] { 1, 2, 95, 95, (SM_OPCODE) 75 /* sub */, 0 }, // state 212 [conv.r4 -> sub] { 1, 2, 95, 95, (SM_OPCODE) 76 /* mul */, 0 }, // state 213 [conv.r4 -> mul] { 1, 2, 95, 95, (SM_OPCODE) 77 /* div */, 0 }, // state 214 [conv.r4 -> div] { 1, 2, 96, 96, (SM_OPCODE) 76 /* mul */, 0 }, // state 215 [conv.r8 -> mul] { 1, 2, 96, 96, (SM_OPCODE) 77 /* div */, 0 }, // state 216 [conv.r8 -> div] { 0, 2, 3, 3, (SM_OPCODE) 21 /* ldc.i4.0 */, 228 }, // state 217 [ldarg.0 -> ldc.i4.0] { 1, 3, 3, 217, (SM_OPCODE)109 /* stfld */, 0 }, // state 218 [ldarg.0 -> ldc.i4.0 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 33 /* ldc.r4 */, 230 }, // state 219 [ldarg.0 -> ldc.r4] { 1, 3, 3, 219, (SM_OPCODE)109 /* stfld */, 0 }, // state 220 [ldarg.0 -> ldc.r4 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 34 /* ldc.r8 */, 232 }, // state 221 [ldarg.0 -> ldc.r8] { 1, 3, 3, 221, (SM_OPCODE)109 /* stfld */, 0 }, // state 222 [ldarg.0 -> ldc.r8 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 2 /* ldarg.1 */, 238 }, // state 223 [ldarg.0 -> ldarg.1] { 0, 3, 3, 223, (SM_OPCODE)107 /* ldfld */, 236 }, // state 224 [ldarg.0 -> ldarg.1 -> ldfld] { 1, 4, 3, 224, (SM_OPCODE)109 /* stfld */, 0 }, // state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld] { 1, 3, 3, 223, (SM_OPCODE)109 /* stfld */, 0 }, // state 226 [ldarg.0 -> ldarg.1 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 3 /* ldarg.2 */, 240 }, // state 227 [ldarg.0 -> ldarg.2] { 1, 3, 3, 227, (SM_OPCODE)109 /* stfld */, 0 }, // state 228 [ldarg.0 -> ldarg.2 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 4 /* ldarg.3 */, 242 }, // state 229 [ldarg.0 -> ldarg.3] { 1, 3, 3, 229, (SM_OPCODE)109 /* stfld */, 0 }, // state 230 [ldarg.0 -> ldarg.3 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 36 /* dup */, 248 }, // state 231 [ldarg.0 -> dup] { 0, 3, 3, 231, (SM_OPCODE)107 /* ldfld */, 460 }, // state 232 [ldarg.0 -> dup -> ldfld] { 0, 4, 3, 232, (SM_OPCODE) 2 /* ldarg.1 */, 318 }, // state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] { 0, 5, 3, 233, (SM_OPCODE) 74 /* add */, 256 }, // state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] { 1, 6, 3, 234, (SM_OPCODE)109 /* stfld */, 0 }, // state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 75 /* sub */, 258 }, // state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] { 1, 6, 3, 236, (SM_OPCODE)109 /* stfld */, 0 }, // state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 76 /* mul */, 260 }, // state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] { 1, 6, 3, 238, (SM_OPCODE)109 /* stfld */, 0 }, // state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 77 /* div */, 262 }, // state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] { 1, 6, 3, 240, (SM_OPCODE)109 /* stfld */, 0 }, // state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld] { 0, 3, 191, 191, (SM_OPCODE) 2 /* ldarg.1 */, 268 }, // state 242 [ldarg.0 -> ldfld -> ldarg.1] { 0, 4, 191, 242, (SM_OPCODE)107 /* ldfld */, 336 }, // state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] { 1, 5, 191, 243, (SM_OPCODE) 74 /* add */, 0 }, // state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add] { 1, 5, 191, 243, (SM_OPCODE) 75 /* sub */, 0 }, // state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub] { 0, 3, 195, 195, (SM_OPCODE) 14 /* ldarga.s */, 274 }, // state 246 [ldarga.s -> ldfld -> ldarga.s] { 0, 4, 195, 246, (SM_OPCODE)107 /* ldfld */, 342 }, // state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] { 1, 5, 195, 247, (SM_OPCODE) 74 /* add */, 0 }, // state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add] { 1, 5, 195, 247, (SM_OPCODE) 75 /* sub */, 0 }, // state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub] }; // clang-format on static_assert_no_msg(NUM_SM_STATES == ArrLen(g_SMStates)); const SMState* gp_SMStates = g_SMStates; // // JumpTableCells in the state machine // // clang-format off const JumpTableCell g_SMJumpTableCells[] = { // {src, dest } { 1, 2 }, // cell# 0 : state 1 [start] --(0 noshow)--> state 2 [noshow] { 1, 3 }, // cell# 1 : state 1 [start] --(1 ldarg.0)--> state 3 [ldarg.0] { 1, 4 }, // cell# 2 : state 1 [start] --(2 ldarg.1)--> state 4 [ldarg.1] { 1, 5 }, // cell# 3 : state 1 [start] --(3 ldarg.2)--> state 5 [ldarg.2] { 1, 6 }, // cell# 4 : state 1 [start] --(4 ldarg.3)--> state 6 [ldarg.3] { 1, 7 }, // cell# 5 : state 1 [start] --(5 ldloc.0)--> state 7 [ldloc.0] { 1, 8 }, // cell# 6 : state 1 [start] --(6 ldloc.1)--> state 8 [ldloc.1] { 1, 9 }, // cell# 7 : state 1 [start] --(7 ldloc.2)--> state 9 [ldloc.2] { 1, 10 }, // cell# 8 : state 1 [start] --(8 ldloc.3)--> state 10 [ldloc.3] { 1, 11 }, // cell# 9 : state 1 [start] --(9 stloc.0)--> state 11 [stloc.0] { 1, 12 }, // cell# 10 : state 1 [start] --(10 stloc.1)--> state 12 [stloc.1] { 1, 13 }, // cell# 11 : state 1 [start] --(11 stloc.2)--> state 13 [stloc.2] { 1, 14 }, // cell# 12 : state 1 [start] --(12 stloc.3)--> state 14 [stloc.3] { 1, 15 }, // cell# 13 : state 1 [start] --(13 ldarg.s)--> state 15 [ldarg.s] { 1, 16 }, // cell# 14 : state 1 [start] --(14 ldarga.s)--> state 16 [ldarga.s] { 1, 17 }, // cell# 15 : state 1 [start] --(15 starg.s)--> state 17 [starg.s] { 1, 18 }, // cell# 16 : state 1 [start] --(16 ldloc.s)--> state 18 [ldloc.s] { 1, 19 }, // cell# 17 : state 1 [start] --(17 ldloca.s)--> state 19 [ldloca.s] { 1, 20 }, // cell# 18 : state 1 [start] --(18 stloc.s)--> state 20 [stloc.s] { 1, 21 }, // cell# 19 : state 1 [start] --(19 ldnull)--> state 21 [ldnull] { 1, 22 }, // cell# 20 : state 1 [start] --(20 ldc.i4.m1)--> state 22 [ldc.i4.m1] { 1, 23 }, // cell# 21 : state 1 [start] --(21 ldc.i4.0)--> state 23 [ldc.i4.0] { 1, 24 }, // cell# 22 : state 1 [start] --(22 ldc.i4.1)--> state 24 [ldc.i4.1] { 1, 25 }, // cell# 23 : state 1 [start] --(23 ldc.i4.2)--> state 25 [ldc.i4.2] { 1, 26 }, // cell# 24 : state 1 [start] --(24 ldc.i4.3)--> state 26 [ldc.i4.3] { 1, 27 }, // cell# 25 : state 1 [start] --(25 ldc.i4.4)--> state 27 [ldc.i4.4] { 1, 28 }, // cell# 26 : state 1 [start] --(26 ldc.i4.5)--> state 28 [ldc.i4.5] { 1, 29 }, // cell# 27 : state 1 [start] --(27 ldc.i4.6)--> state 29 [ldc.i4.6] { 1, 30 }, // cell# 28 : state 1 [start] --(28 ldc.i4.7)--> state 30 [ldc.i4.7] { 1, 31 }, // cell# 29 : state 1 [start] --(29 ldc.i4.8)--> state 31 [ldc.i4.8] { 1, 32 }, // cell# 30 : state 1 [start] --(30 ldc.i4.s)--> state 32 [ldc.i4.s] { 1, 33 }, // cell# 31 : state 1 [start] --(31 ldc.i4)--> state 33 [ldc.i4] { 1, 34 }, // cell# 32 : state 1 [start] --(32 ldc.i8)--> state 34 [ldc.i8] { 1, 35 }, // cell# 33 : state 1 [start] --(33 ldc.r4)--> state 35 [ldc.r4] { 1, 36 }, // cell# 34 : state 1 [start] --(34 ldc.r8)--> state 36 [ldc.r8] { 1, 37 }, // cell# 35 : state 1 [start] --(35 unused)--> state 37 [unused] { 1, 38 }, // cell# 36 : state 1 [start] --(36 dup)--> state 38 [dup] { 1, 39 }, // cell# 37 : state 1 [start] --(37 pop)--> state 39 [pop] { 1, 40 }, // cell# 38 : state 1 [start] --(38 call)--> state 40 [call] { 1, 41 }, // cell# 39 : state 1 [start] --(39 calli)--> state 41 [calli] { 1, 42 }, // cell# 40 : state 1 [start] --(40 ret)--> state 42 [ret] { 1, 43 }, // cell# 41 : state 1 [start] --(41 br.s)--> state 43 [br.s] { 1, 44 }, // cell# 42 : state 1 [start] --(42 brfalse.s)--> state 44 [brfalse.s] { 1, 45 }, // cell# 43 : state 1 [start] --(43 brtrue.s)--> state 45 [brtrue.s] { 1, 46 }, // cell# 44 : state 1 [start] --(44 beq.s)--> state 46 [beq.s] { 1, 47 }, // cell# 45 : state 1 [start] --(45 bge.s)--> state 47 [bge.s] { 1, 48 }, // cell# 46 : state 1 [start] --(46 bgt.s)--> state 48 [bgt.s] { 1, 49 }, // cell# 47 : state 1 [start] --(47 ble.s)--> state 49 [ble.s] { 1, 50 }, // cell# 48 : state 1 [start] --(48 blt.s)--> state 50 [blt.s] { 1, 51 }, // cell# 49 : state 1 [start] --(49 bne.un.s)--> state 51 [bne.un.s] { 1, 52 }, // cell# 50 : state 1 [start] --(50 bge.un.s)--> state 52 [bge.un.s] { 1, 53 }, // cell# 51 : state 1 [start] --(51 bgt.un.s)--> state 53 [bgt.un.s] { 1, 54 }, // cell# 52 : state 1 [start] --(52 ble.un.s)--> state 54 [ble.un.s] { 1, 55 }, // cell# 53 : state 1 [start] --(53 blt.un.s)--> state 55 [blt.un.s] { 1, 56 }, // cell# 54 : state 1 [start] --(54 long.branch)--> state 56 [long.branch] { 1, 57 }, // cell# 55 : state 1 [start] --(55 switch)--> state 57 [switch] { 1, 58 }, // cell# 56 : state 1 [start] --(56 ldind.i1)--> state 58 [ldind.i1] { 1, 59 }, // cell# 57 : state 1 [start] --(57 ldind.u1)--> state 59 [ldind.u1] { 1, 60 }, // cell# 58 : state 1 [start] --(58 ldind.i2)--> state 60 [ldind.i2] { 1, 61 }, // cell# 59 : state 1 [start] --(59 ldind.u2)--> state 61 [ldind.u2] { 1, 62 }, // cell# 60 : state 1 [start] --(60 ldind.i4)--> state 62 [ldind.i4] { 1, 63 }, // cell# 61 : state 1 [start] --(61 ldind.u4)--> state 63 [ldind.u4] { 1, 64 }, // cell# 62 : state 1 [start] --(62 ldind.i8)--> state 64 [ldind.i8] { 1, 65 }, // cell# 63 : state 1 [start] --(63 ldind.i)--> state 65 [ldind.i] { 1, 66 }, // cell# 64 : state 1 [start] --(64 ldind.r4)--> state 66 [ldind.r4] { 1, 67 }, // cell# 65 : state 1 [start] --(65 ldind.r8)--> state 67 [ldind.r8] { 1, 68 }, // cell# 66 : state 1 [start] --(66 ldind.ref)--> state 68 [ldind.ref] { 1, 69 }, // cell# 67 : state 1 [start] --(67 stind.ref)--> state 69 [stind.ref] { 1, 70 }, // cell# 68 : state 1 [start] --(68 stind.i1)--> state 70 [stind.i1] { 1, 71 }, // cell# 69 : state 1 [start] --(69 stind.i2)--> state 71 [stind.i2] { 1, 72 }, // cell# 70 : state 1 [start] --(70 stind.i4)--> state 72 [stind.i4] { 1, 73 }, // cell# 71 : state 1 [start] --(71 stind.i8)--> state 73 [stind.i8] { 1, 74 }, // cell# 72 : state 1 [start] --(72 stind.r4)--> state 74 [stind.r4] { 1, 75 }, // cell# 73 : state 1 [start] --(73 stind.r8)--> state 75 [stind.r8] { 1, 76 }, // cell# 74 : state 1 [start] --(74 add)--> state 76 [add] { 1, 77 }, // cell# 75 : state 1 [start] --(75 sub)--> state 77 [sub] { 1, 78 }, // cell# 76 : state 1 [start] --(76 mul)--> state 78 [mul] { 1, 79 }, // cell# 77 : state 1 [start] --(77 div)--> state 79 [div] { 1, 80 }, // cell# 78 : state 1 [start] --(78 div.un)--> state 80 [div.un] { 1, 81 }, // cell# 79 : state 1 [start] --(79 rem)--> state 81 [rem] { 1, 82 }, // cell# 80 : state 1 [start] --(80 rem.un)--> state 82 [rem.un] { 1, 83 }, // cell# 81 : state 1 [start] --(81 and)--> state 83 [and] { 1, 84 }, // cell# 82 : state 1 [start] --(82 or)--> state 84 [or] { 1, 85 }, // cell# 83 : state 1 [start] --(83 xor)--> state 85 [xor] { 1, 86 }, // cell# 84 : state 1 [start] --(84 shl)--> state 86 [shl] { 1, 87 }, // cell# 85 : state 1 [start] --(85 shr)--> state 87 [shr] { 1, 88 }, // cell# 86 : state 1 [start] --(86 shr.un)--> state 88 [shr.un] { 1, 89 }, // cell# 87 : state 1 [start] --(87 neg)--> state 89 [neg] { 1, 90 }, // cell# 88 : state 1 [start] --(88 not)--> state 90 [not] { 1, 91 }, // cell# 89 : state 1 [start] --(89 conv.i1)--> state 91 [conv.i1] { 1, 92 }, // cell# 90 : state 1 [start] --(90 conv.i2)--> state 92 [conv.i2] { 1, 93 }, // cell# 91 : state 1 [start] --(91 conv.i4)--> state 93 [conv.i4] { 1, 94 }, // cell# 92 : state 1 [start] --(92 conv.i8)--> state 94 [conv.i8] { 1, 95 }, // cell# 93 : state 1 [start] --(93 conv.r4)--> state 95 [conv.r4] { 1, 96 }, // cell# 94 : state 1 [start] --(94 conv.r8)--> state 96 [conv.r8] { 1, 97 }, // cell# 95 : state 1 [start] --(95 conv.u4)--> state 97 [conv.u4] { 1, 98 }, // cell# 96 : state 1 [start] --(96 conv.u8)--> state 98 [conv.u8] { 1, 99 }, // cell# 97 : state 1 [start] --(97 callvirt)--> state 99 [callvirt] { 1, 100 }, // cell# 98 : state 1 [start] --(98 cpobj)--> state 100 [cpobj] { 1, 101 }, // cell# 99 : state 1 [start] --(99 ldobj)--> state 101 [ldobj] { 1, 102 }, // cell# 100 : state 1 [start] --(100 ldstr)--> state 102 [ldstr] { 1, 103 }, // cell# 101 : state 1 [start] --(101 newobj)--> state 103 [newobj] { 1, 104 }, // cell# 102 : state 1 [start] --(102 castclass)--> state 104 [castclass] { 1, 105 }, // cell# 103 : state 1 [start] --(103 isinst)--> state 105 [isinst] { 1, 106 }, // cell# 104 : state 1 [start] --(104 conv.r.un)--> state 106 [conv.r.un] { 1, 107 }, // cell# 105 : state 1 [start] --(105 unbox)--> state 107 [unbox] { 1, 108 }, // cell# 106 : state 1 [start] --(106 throw)--> state 108 [throw] { 1, 109 }, // cell# 107 : state 1 [start] --(107 ldfld)--> state 109 [ldfld] { 1, 110 }, // cell# 108 : state 1 [start] --(108 ldflda)--> state 110 [ldflda] { 1, 111 }, // cell# 109 : state 1 [start] --(109 stfld)--> state 111 [stfld] { 1, 112 }, // cell# 110 : state 1 [start] --(110 ldsfld)--> state 112 [ldsfld] { 1, 113 }, // cell# 111 : state 1 [start] --(111 ldsflda)--> state 113 [ldsflda] { 1, 114 }, // cell# 112 : state 1 [start] --(112 stsfld)--> state 114 [stsfld] { 1, 115 }, // cell# 113 : state 1 [start] --(113 stobj)--> state 115 [stobj] { 1, 116 }, // cell# 114 : state 1 [start] --(114 ovf.notype.un)--> state 116 [ovf.notype.un] { 1, 117 }, // cell# 115 : state 1 [start] --(115 box)--> state 117 [box] { 1, 118 }, // cell# 116 : state 1 [start] --(116 newarr)--> state 118 [newarr] { 1, 119 }, // cell# 117 : state 1 [start] --(117 ldlen)--> state 119 [ldlen] { 1, 120 }, // cell# 118 : state 1 [start] --(118 ldelema)--> state 120 [ldelema] { 1, 121 }, // cell# 119 : state 1 [start] --(119 ldelem.i1)--> state 121 [ldelem.i1] { 1, 122 }, // cell# 120 : state 1 [start] --(120 ldelem.u1)--> state 122 [ldelem.u1] { 1, 123 }, // cell# 121 : state 1 [start] --(121 ldelem.i2)--> state 123 [ldelem.i2] { 1, 124 }, // cell# 122 : state 1 [start] --(122 ldelem.u2)--> state 124 [ldelem.u2] { 1, 125 }, // cell# 123 : state 1 [start] --(123 ldelem.i4)--> state 125 [ldelem.i4] { 1, 126 }, // cell# 124 : state 1 [start] --(124 ldelem.u4)--> state 126 [ldelem.u4] { 1, 127 }, // cell# 125 : state 1 [start] --(125 ldelem.i8)--> state 127 [ldelem.i8] { 1, 128 }, // cell# 126 : state 1 [start] --(126 ldelem.i)--> state 128 [ldelem.i] { 1, 129 }, // cell# 127 : state 1 [start] --(127 ldelem.r4)--> state 129 [ldelem.r4] { 1, 130 }, // cell# 128 : state 1 [start] --(128 ldelem.r8)--> state 130 [ldelem.r8] { 1, 131 }, // cell# 129 : state 1 [start] --(129 ldelem.ref)--> state 131 [ldelem.ref] { 1, 132 }, // cell# 130 : state 1 [start] --(130 stelem.i)--> state 132 [stelem.i] { 1, 133 }, // cell# 131 : state 1 [start] --(131 stelem.i1)--> state 133 [stelem.i1] { 1, 134 }, // cell# 132 : state 1 [start] --(132 stelem.i2)--> state 134 [stelem.i2] { 1, 135 }, // cell# 133 : state 1 [start] --(133 stelem.i4)--> state 135 [stelem.i4] { 1, 136 }, // cell# 134 : state 1 [start] --(134 stelem.i8)--> state 136 [stelem.i8] { 1, 137 }, // cell# 135 : state 1 [start] --(135 stelem.r4)--> state 137 [stelem.r4] { 1, 138 }, // cell# 136 : state 1 [start] --(136 stelem.r8)--> state 138 [stelem.r8] { 1, 139 }, // cell# 137 : state 1 [start] --(137 stelem.ref)--> state 139 [stelem.ref] { 1, 140 }, // cell# 138 : state 1 [start] --(138 ldelem)--> state 140 [ldelem] { 1, 141 }, // cell# 139 : state 1 [start] --(139 stelem)--> state 141 [stelem] { 1, 142 }, // cell# 140 : state 1 [start] --(140 unbox.any)--> state 142 [unbox.any] { 1, 143 }, // cell# 141 : state 1 [start] --(141 conv.ovf.i1)--> state 143 [conv.ovf.i1] { 1, 144 }, // cell# 142 : state 1 [start] --(142 conv.ovf.u1)--> state 144 [conv.ovf.u1] { 1, 145 }, // cell# 143 : state 1 [start] --(143 conv.ovf.i2)--> state 145 [conv.ovf.i2] { 1, 146 }, // cell# 144 : state 1 [start] --(144 conv.ovf.u2)--> state 146 [conv.ovf.u2] { 1, 147 }, // cell# 145 : state 1 [start] --(145 conv.ovf.i4)--> state 147 [conv.ovf.i4] { 1, 148 }, // cell# 146 : state 1 [start] --(146 conv.ovf.u4)--> state 148 [conv.ovf.u4] { 1, 149 }, // cell# 147 : state 1 [start] --(147 conv.ovf.i8)--> state 149 [conv.ovf.i8] { 1, 150 }, // cell# 148 : state 1 [start] --(148 conv.ovf.u8)--> state 150 [conv.ovf.u8] { 1, 151 }, // cell# 149 : state 1 [start] --(149 refanyval)--> state 151 [refanyval] { 1, 152 }, // cell# 150 : state 1 [start] --(150 ckfinite)--> state 152 [ckfinite] { 1, 153 }, // cell# 151 : state 1 [start] --(151 mkrefany)--> state 153 [mkrefany] { 1, 154 }, // cell# 152 : state 1 [start] --(152 ldtoken)--> state 154 [ldtoken] { 1, 155 }, // cell# 153 : state 1 [start] --(153 conv.u2)--> state 155 [conv.u2] { 1, 156 }, // cell# 154 : state 1 [start] --(154 conv.u1)--> state 156 [conv.u1] { 1, 157 }, // cell# 155 : state 1 [start] --(155 conv.i)--> state 157 [conv.i] { 1, 158 }, // cell# 156 : state 1 [start] --(156 conv.ovf.i)--> state 158 [conv.ovf.i] { 1, 159 }, // cell# 157 : state 1 [start] --(157 conv.ovf.u)--> state 159 [conv.ovf.u] { 1, 160 }, // cell# 158 : state 1 [start] --(158 add.ovf)--> state 160 [add.ovf] { 1, 161 }, // cell# 159 : state 1 [start] --(159 mul.ovf)--> state 161 [mul.ovf] { 1, 162 }, // cell# 160 : state 1 [start] --(160 sub.ovf)--> state 162 [sub.ovf] { 1, 163 }, // cell# 161 : state 1 [start] --(161 leave.s)--> state 163 [leave.s] { 1, 164 }, // cell# 162 : state 1 [start] --(162 stind.i)--> state 164 [stind.i] { 1, 165 }, // cell# 163 : state 1 [start] --(163 conv.u)--> state 165 [conv.u] { 1, 166 }, // cell# 164 : state 1 [start] --(164 prefix.n)--> state 166 [prefix.n] { 1, 167 }, // cell# 165 : state 1 [start] --(165 arglist)--> state 167 [arglist] { 1, 168 }, // cell# 166 : state 1 [start] --(166 ceq)--> state 168 [ceq] { 1, 169 }, // cell# 167 : state 1 [start] --(167 cgt)--> state 169 [cgt] { 1, 170 }, // cell# 168 : state 1 [start] --(168 cgt.un)--> state 170 [cgt.un] { 1, 171 }, // cell# 169 : state 1 [start] --(169 clt)--> state 171 [clt] { 1, 172 }, // cell# 170 : state 1 [start] --(170 clt.un)--> state 172 [clt.un] { 1, 173 }, // cell# 171 : state 1 [start] --(171 ldftn)--> state 173 [ldftn] { 1, 174 }, // cell# 172 : state 1 [start] --(172 ldvirtftn)--> state 174 [ldvirtftn] { 1, 175 }, // cell# 173 : state 1 [start] --(173 long.loc.arg)--> state 175 [long.loc.arg] { 1, 176 }, // cell# 174 : state 1 [start] --(174 localloc)--> state 176 [localloc] { 1, 177 }, // cell# 175 : state 1 [start] --(175 unaligned)--> state 177 [unaligned] { 1, 178 }, // cell# 176 : state 1 [start] --(176 volatile)--> state 178 [volatile] { 1, 179 }, // cell# 177 : state 1 [start] --(177 tailcall)--> state 179 [tailcall] { 1, 180 }, // cell# 178 : state 1 [start] --(178 initobj)--> state 180 [initobj] { 1, 181 }, // cell# 179 : state 1 [start] --(179 constrained)--> state 181 [constrained] { 1, 182 }, // cell# 180 : state 1 [start] --(180 cpblk)--> state 182 [cpblk] { 1, 183 }, // cell# 181 : state 1 [start] --(181 initblk)--> state 183 [initblk] { 1, 184 }, // cell# 182 : state 1 [start] --(182 rethrow)--> state 184 [rethrow] { 1, 185 }, // cell# 183 : state 1 [start] --(183 sizeof)--> state 185 [sizeof] { 1, 186 }, // cell# 184 : state 1 [start] --(184 refanytype)--> state 186 [refanytype] { 1, 187 }, // cell# 185 : state 1 [start] --(185 readonly)--> state 187 [readonly] { 1, 188 }, // cell# 186 : state 1 [start] --(186 ldarga.s.normed)--> state 188 [ldarga.s.normed] { 1, 189 }, // cell# 187 : state 1 [start] --(187 ldloca.s.normed)--> state 189 [ldloca.s.normed] { 3, 223 }, // cell# 188 : state 3 [ldarg.0] --(2 ldarg.1)--> state 223 [ldarg.0 -> ldarg.1] { 3, 227 }, // cell# 189 : state 3 [ldarg.0] --(3 ldarg.2)--> state 227 [ldarg.0 -> ldarg.2] { 3, 229 }, // cell# 190 : state 3 [ldarg.0] --(4 ldarg.3)--> state 229 [ldarg.0 -> ldarg.3] { 4, 192 }, // cell# 191 : state 4 [ldarg.1] --(107 ldfld)--> state 192 [ldarg.1 -> ldfld] { 5, 193 }, // cell# 192 : state 5 [ldarg.2] --(107 ldfld)--> state 193 [ldarg.2 -> ldfld] { 6, 194 }, // cell# 193 : state 6 [ldarg.3] --(107 ldfld)--> state 194 [ldarg.3 -> ldfld] { 11, 199 }, // cell# 194 : state 11 [stloc.0] --(5 ldloc.0)--> state 199 [stloc.0 -> ldloc.0] { 12, 200 }, // cell# 195 : state 12 [stloc.1] --(6 ldloc.1)--> state 200 [stloc.1 -> ldloc.1] { 13, 201 }, // cell# 196 : state 13 [stloc.2] --(7 ldloc.2)--> state 201 [stloc.2 -> ldloc.2] { 14, 202 }, // cell# 197 : state 14 [stloc.3] --(8 ldloc.3)--> state 202 [stloc.3 -> ldloc.3] { 16, 195 }, // cell# 198 : state 16 [ldarga.s] --(107 ldfld)--> state 195 [ldarga.s -> ldfld] { 19, 196 }, // cell# 199 : state 19 [ldloca.s] --(107 ldfld)--> state 196 [ldloca.s -> ldfld] { 35, 203 }, // cell# 200 : state 35 [ldc.r4] --(74 add)--> state 203 [ldc.r4 -> add] { 35, 204 }, // cell# 201 : state 35 [ldc.r4] --(75 sub)--> state 204 [ldc.r4 -> sub] { 35, 205 }, // cell# 202 : state 35 [ldc.r4] --(76 mul)--> state 205 [ldc.r4 -> mul] { 35, 206 }, // cell# 203 : state 35 [ldc.r4] --(77 div)--> state 206 [ldc.r4 -> div] { 96, 215 }, // cell# 204 : state 96 [conv.r8] --(76 mul)--> state 215 [conv.r8 -> mul] { 96, 216 }, // cell# 205 : state 96 [conv.r8] --(77 div)--> state 216 [conv.r8 -> div] {181, 190 }, // cell# 206 : state 181 [constrained] --(97 callvirt)--> state 190 [constrained -> callvirt] { 3, 217 }, // cell# 207 : state 3 [ldarg.0] --(21 ldc.i4.0)--> state 217 [ldarg.0 -> ldc.i4.0] { 36, 207 }, // cell# 208 : state 36 [ldc.r8] --(74 add)--> state 207 [ldc.r8 -> add] { 36, 208 }, // cell# 209 : state 36 [ldc.r8] --(75 sub)--> state 208 [ldc.r8 -> sub] { 36, 209 }, // cell# 210 : state 36 [ldc.r8] --(76 mul)--> state 209 [ldc.r8 -> mul] { 36, 210 }, // cell# 211 : state 36 [ldc.r8] --(77 div)--> state 210 [ldc.r8 -> div] { 95, 211 }, // cell# 212 : state 95 [conv.r4] --(74 add)--> state 211 [conv.r4 -> add] { 95, 212 }, // cell# 213 : state 95 [conv.r4] --(75 sub)--> state 212 [conv.r4 -> sub] { 95, 213 }, // cell# 214 : state 95 [conv.r4] --(76 mul)--> state 213 [conv.r4 -> mul] { 95, 214 }, // cell# 215 : state 95 [conv.r4] --(77 div)--> state 214 [conv.r4 -> div] {188, 197 }, // cell# 216 : state 188 [ldarga.s.normed] --(107 ldfld)--> state 197 [ldarga.s.normed -> ldfld] {189, 198 }, // cell# 217 : state 189 [ldloca.s.normed] --(107 ldfld)--> state 198 [ldloca.s.normed -> ldfld] {191, 242 }, // cell# 218 : state 191 [ldarg.0 -> ldfld] --(2 ldarg.1)--> state 242 [ldarg.0 -> ldfld -> ldarg.1] { 3, 219 }, // cell# 219 : state 3 [ldarg.0] --(33 ldc.r4)--> state 219 [ldarg.0 -> ldc.r4] { 3, 221 }, // cell# 220 : state 3 [ldarg.0] --(34 ldc.r8)--> state 221 [ldarg.0 -> ldc.r8] {195, 246 }, // cell# 221 : state 195 [ldarga.s -> ldfld] --(14 ldarga.s)--> state 246 [ldarga.s -> ldfld -> ldarga.s] { 3, 231 }, // cell# 222 : state 3 [ldarg.0] --(36 dup)--> state 231 [ldarg.0 -> dup] {217, 218 }, // cell# 223 : state 217 [ldarg.0 -> ldc.i4.0] --(109 stfld)--> state 218 [ldarg.0 -> ldc.i4.0 -> stfld] {219, 220 }, // cell# 224 : state 219 [ldarg.0 -> ldc.r4] --(109 stfld)--> state 220 [ldarg.0 -> ldc.r4 -> stfld] {221, 222 }, // cell# 225 : state 221 [ldarg.0 -> ldc.r8] --(109 stfld)--> state 222 [ldarg.0 -> ldc.r8 -> stfld] {223, 224 }, // cell# 226 : state 223 [ldarg.0 -> ldarg.1] --(107 ldfld)--> state 224 [ldarg.0 -> ldarg.1 -> ldfld] {224, 225 }, // cell# 227 : state 224 [ldarg.0 -> ldarg.1 -> ldfld] --(109 stfld)--> state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld] {223, 226 }, // cell# 228 : state 223 [ldarg.0 -> ldarg.1] --(109 stfld)--> state 226 [ldarg.0 -> ldarg.1 -> stfld] {227, 228 }, // cell# 229 : state 227 [ldarg.0 -> ldarg.2] --(109 stfld)--> state 228 [ldarg.0 -> ldarg.2 -> stfld] {229, 230 }, // cell# 230 : state 229 [ldarg.0 -> ldarg.3] --(109 stfld)--> state 230 [ldarg.0 -> ldarg.3 -> stfld] {231, 232 }, // cell# 231 : state 231 [ldarg.0 -> dup] --(107 ldfld)--> state 232 [ldarg.0 -> dup -> ldfld] {232, 233 }, // cell# 232 : state 232 [ldarg.0 -> dup -> ldfld] --(2 ldarg.1)--> state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] {233, 234 }, // cell# 233 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(74 add)--> state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] {233, 236 }, // cell# 234 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(75 sub)--> state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] {233, 238 }, // cell# 235 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(76 mul)--> state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] {233, 240 }, // cell# 236 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(77 div)--> state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] {234, 235 }, // cell# 237 : state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] --(109 stfld)--> state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld] {236, 237 }, // cell# 238 : state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] --(109 stfld)--> state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld] {238, 239 }, // cell# 239 : state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] --(109 stfld)--> state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld] {240, 241 }, // cell# 240 : state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] --(109 stfld)--> state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld] {242, 243 }, // cell# 241 : state 242 [ldarg.0 -> ldfld -> ldarg.1] --(107 ldfld)--> state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] {243, 244 }, // cell# 242 : state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] --(74 add)--> state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add] {243, 245 }, // cell# 243 : state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] --(75 sub)--> state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub] {246, 247 }, // cell# 244 : state 246 [ldarga.s -> ldfld -> ldarga.s] --(107 ldfld)--> state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] {247, 248 }, // cell# 245 : state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] --(74 add)--> state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add] {247, 249 }, // cell# 246 : state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] --(75 sub)--> state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub] { 0, 0 }, // cell# 247 { 0, 0 }, // cell# 248 { 0, 0 }, // cell# 249 { 0, 0 }, // cell# 250 { 0, 0 }, // cell# 251 { 0, 0 }, // cell# 252 { 0, 0 }, // cell# 253 { 0, 0 }, // cell# 254 { 0, 0 }, // cell# 255 { 0, 0 }, // cell# 256 { 0, 0 }, // cell# 257 { 0, 0 }, // cell# 258 { 0, 0 }, // cell# 259 { 0, 0 }, // cell# 260 { 0, 0 }, // cell# 261 { 0, 0 }, // cell# 262 { 0, 0 }, // cell# 263 { 0, 0 }, // cell# 264 { 0, 0 }, // cell# 265 { 0, 0 }, // cell# 266 { 0, 0 }, // cell# 267 { 0, 0 }, // cell# 268 { 0, 0 }, // cell# 269 { 0, 0 }, // cell# 270 { 0, 0 }, // cell# 271 { 0, 0 }, // cell# 272 { 0, 0 }, // cell# 273 { 0, 0 }, // cell# 274 { 0, 0 }, // cell# 275 { 0, 0 }, // cell# 276 { 0, 0 }, // cell# 277 { 0, 0 }, // cell# 278 { 0, 0 }, // cell# 279 { 0, 0 }, // cell# 280 { 0, 0 }, // cell# 281 { 0, 0 }, // cell# 282 { 0, 0 }, // cell# 283 { 0, 0 }, // cell# 284 { 0, 0 }, // cell# 285 { 0, 0 }, // cell# 286 { 0, 0 }, // cell# 287 { 0, 0 }, // cell# 288 { 0, 0 }, // cell# 289 { 0, 0 }, // cell# 290 { 0, 0 }, // cell# 291 { 0, 0 }, // cell# 292 { 3, 191 }, // cell# 293 : state 3 [ldarg.0] --(107 ldfld)--> state 191 [ldarg.0 -> ldfld] { 0, 0 }, // cell# 294 { 0, 0 }, // cell# 295 { 0, 0 }, // cell# 296 { 0, 0 }, // cell# 297 { 0, 0 }, // cell# 298 { 0, 0 }, // cell# 299 { 0, 0 }, // cell# 300 { 0, 0 }, // cell# 301 { 0, 0 }, // cell# 302 { 0, 0 }, // cell# 303 { 0, 0 }, // cell# 304 { 0, 0 }, // cell# 305 { 0, 0 }, // cell# 306 { 0, 0 }, // cell# 307 { 0, 0 }, // cell# 308 { 0, 0 }, // cell# 309 { 0, 0 }, // cell# 310 { 0, 0 }, // cell# 311 { 0, 0 }, // cell# 312 { 0, 0 }, // cell# 313 { 0, 0 }, // cell# 314 { 0, 0 }, // cell# 315 { 0, 0 }, // cell# 316 { 0, 0 }, // cell# 317 { 0, 0 }, // cell# 318 { 0, 0 }, // cell# 319 { 0, 0 }, // cell# 320 { 0, 0 }, // cell# 321 { 0, 0 }, // cell# 322 { 0, 0 }, // cell# 323 { 0, 0 }, // cell# 324 { 0, 0 }, // cell# 325 { 0, 0 }, // cell# 326 { 0, 0 }, // cell# 327 { 0, 0 }, // cell# 328 { 0, 0 }, // cell# 329 { 0, 0 }, // cell# 330 { 0, 0 }, // cell# 331 { 0, 0 }, // cell# 332 { 0, 0 }, // cell# 333 { 0, 0 }, // cell# 334 { 0, 0 }, // cell# 335 { 0, 0 }, // cell# 336 { 0, 0 }, // cell# 337 { 0, 0 }, // cell# 338 { 0, 0 }, // cell# 339 { 0, 0 }, // cell# 340 { 0, 0 }, // cell# 341 { 0, 0 }, // cell# 342 { 0, 0 }, // cell# 343 { 0, 0 }, // cell# 344 { 0, 0 }, // cell# 345 { 0, 0 }, // cell# 346 { 0, 0 }, // cell# 347 { 0, 0 }, // cell# 348 { 0, 0 }, // cell# 349 { 0, 0 }, // cell# 350 { 0, 0 }, // cell# 351 { 0, 0 }, // cell# 352 { 0, 0 }, // cell# 353 { 0, 0 }, // cell# 354 { 0, 0 }, // cell# 355 { 0, 0 }, // cell# 356 { 0, 0 }, // cell# 357 { 0, 0 }, // cell# 358 { 0, 0 }, // cell# 359 { 0, 0 }, // cell# 360 { 0, 0 }, // cell# 361 { 0, 0 }, // cell# 362 { 0, 0 }, // cell# 363 { 0, 0 }, // cell# 364 { 0, 0 }, // cell# 365 { 0, 0 }, // cell# 366 { 0, 0 }, // cell# 367 { 0, 0 }, // cell# 368 { 0, 0 }, // cell# 369 { 0, 0 }, // cell# 370 { 0, 0 }, // cell# 371 { 0, 0 }, // cell# 372 { 0, 0 }, // cell# 373 { 0, 0 }, // cell# 374 { 0, 0 }, // cell# 375 { 0, 0 }, // cell# 376 { 0, 0 }, // cell# 377 { 0, 0 }, // cell# 378 { 0, 0 }, // cell# 379 { 0, 0 }, // cell# 380 { 0, 0 }, // cell# 381 { 0, 0 }, // cell# 382 { 0, 0 }, // cell# 383 { 0, 0 }, // cell# 384 { 0, 0 }, // cell# 385 { 0, 0 }, // cell# 386 { 0, 0 }, // cell# 387 { 0, 0 }, // cell# 388 { 0, 0 }, // cell# 389 { 0, 0 }, // cell# 390 { 0, 0 }, // cell# 391 { 0, 0 }, // cell# 392 { 0, 0 }, // cell# 393 { 0, 0 }, // cell# 394 { 0, 0 }, // cell# 395 { 0, 0 }, // cell# 396 { 0, 0 }, // cell# 397 { 0, 0 }, // cell# 398 { 0, 0 }, // cell# 399 { 0, 0 }, // cell# 400 { 0, 0 }, // cell# 401 { 0, 0 }, // cell# 402 { 0, 0 }, // cell# 403 { 0, 0 }, // cell# 404 { 0, 0 }, // cell# 405 { 0, 0 }, // cell# 406 { 0, 0 }, // cell# 407 { 0, 0 }, // cell# 408 { 0, 0 }, // cell# 409 { 0, 0 }, // cell# 410 { 0, 0 }, // cell# 411 { 0, 0 }, // cell# 412 { 0, 0 }, // cell# 413 { 0, 0 }, // cell# 414 { 0, 0 }, // cell# 415 { 0, 0 }, // cell# 416 { 0, 0 }, // cell# 417 }; // clang-format on const JumpTableCell* gp_SMJumpTableCells = g_SMJumpTableCells;
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // // Automatically generated code. DO NOT MODIFY! // To generate this file. Do "smgen.exe > SMData.cpp" // // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #include "jitpch.h" // // States in the state machine // // clang-format off const SMState g_SMStates[] = { // {term, len, lng, prev, SMOpcode and SMOpcodeName , offsets } // state ID and name { 0, 0, 0, 0, (SM_OPCODE) 0 /* noshow */, 0 }, // state 0 [invalid] { 0, 0, 0, 0, (SM_OPCODE) 0 /* noshow */, 0 }, // state 1 [start] { 1, 1, 0, 1, (SM_OPCODE) 0 /* noshow */, 0 }, // state 2 [noshow] { 1, 1, 0, 1, (SM_OPCODE) 1 /* ldarg.0 */, 372 }, // state 3 [ldarg.0] { 1, 1, 0, 1, (SM_OPCODE) 2 /* ldarg.1 */, 168 }, // state 4 [ldarg.1] { 1, 1, 0, 1, (SM_OPCODE) 3 /* ldarg.2 */, 170 }, // state 5 [ldarg.2] { 1, 1, 0, 1, (SM_OPCODE) 4 /* ldarg.3 */, 172 }, // state 6 [ldarg.3] { 1, 1, 0, 1, (SM_OPCODE) 5 /* ldloc.0 */, 0 }, // state 7 [ldloc.0] { 1, 1, 0, 1, (SM_OPCODE) 6 /* ldloc.1 */, 0 }, // state 8 [ldloc.1] { 1, 1, 0, 1, (SM_OPCODE) 7 /* ldloc.2 */, 0 }, // state 9 [ldloc.2] { 1, 1, 0, 1, (SM_OPCODE) 8 /* ldloc.3 */, 0 }, // state 10 [ldloc.3] { 1, 1, 0, 1, (SM_OPCODE) 9 /* stloc.0 */, 378 }, // state 11 [stloc.0] { 1, 1, 0, 1, (SM_OPCODE) 10 /* stloc.1 */, 378 }, // state 12 [stloc.1] { 1, 1, 0, 1, (SM_OPCODE) 11 /* stloc.2 */, 378 }, // state 13 [stloc.2] { 1, 1, 0, 1, (SM_OPCODE) 12 /* stloc.3 */, 378 }, // state 14 [stloc.3] { 1, 1, 0, 1, (SM_OPCODE) 13 /* ldarg.s */, 0 }, // state 15 [ldarg.s] { 1, 1, 0, 1, (SM_OPCODE) 14 /* ldarga.s */, 182 }, // state 16 [ldarga.s] { 1, 1, 0, 1, (SM_OPCODE) 15 /* starg.s */, 0 }, // state 17 [starg.s] { 1, 1, 0, 1, (SM_OPCODE) 16 /* ldloc.s */, 0 }, // state 18 [ldloc.s] { 1, 1, 0, 1, (SM_OPCODE) 17 /* ldloca.s */, 184 }, // state 19 [ldloca.s] { 1, 1, 0, 1, (SM_OPCODE) 18 /* stloc.s */, 0 }, // state 20 [stloc.s] { 1, 1, 0, 1, (SM_OPCODE) 19 /* ldnull */, 0 }, // state 21 [ldnull] { 1, 1, 0, 1, (SM_OPCODE) 20 /* ldc.i4.m1 */, 0 }, // state 22 [ldc.i4.m1] { 1, 1, 0, 1, (SM_OPCODE) 21 /* ldc.i4.0 */, 0 }, // state 23 [ldc.i4.0] { 1, 1, 0, 1, (SM_OPCODE) 22 /* ldc.i4.1 */, 0 }, // state 24 [ldc.i4.1] { 1, 1, 0, 1, (SM_OPCODE) 23 /* ldc.i4.2 */, 0 }, // state 25 [ldc.i4.2] { 1, 1, 0, 1, (SM_OPCODE) 24 /* ldc.i4.3 */, 0 }, // state 26 [ldc.i4.3] { 1, 1, 0, 1, (SM_OPCODE) 25 /* ldc.i4.4 */, 0 }, // state 27 [ldc.i4.4] { 1, 1, 0, 1, (SM_OPCODE) 26 /* ldc.i4.5 */, 0 }, // state 28 [ldc.i4.5] { 1, 1, 0, 1, (SM_OPCODE) 27 /* ldc.i4.6 */, 0 }, // state 29 [ldc.i4.6] { 1, 1, 0, 1, (SM_OPCODE) 28 /* ldc.i4.7 */, 0 }, // state 30 [ldc.i4.7] { 1, 1, 0, 1, (SM_OPCODE) 29 /* ldc.i4.8 */, 0 }, // state 31 [ldc.i4.8] { 1, 1, 0, 1, (SM_OPCODE) 30 /* ldc.i4.s */, 0 }, // state 32 [ldc.i4.s] { 1, 1, 0, 1, (SM_OPCODE) 31 /* ldc.i4 */, 0 }, // state 33 [ldc.i4] { 1, 1, 0, 1, (SM_OPCODE) 32 /* ldc.i8 */, 0 }, // state 34 [ldc.i8] { 1, 1, 0, 1, (SM_OPCODE) 33 /* ldc.r4 */, 252 }, // state 35 [ldc.r4] { 1, 1, 0, 1, (SM_OPCODE) 34 /* ldc.r8 */, 268 }, // state 36 [ldc.r8] { 1, 1, 0, 1, (SM_OPCODE) 35 /* unused */, 0 }, // state 37 [unused] { 1, 1, 0, 1, (SM_OPCODE) 36 /* dup */, 0 }, // state 38 [dup] { 1, 1, 0, 1, (SM_OPCODE) 37 /* pop */, 0 }, // state 39 [pop] { 1, 1, 0, 1, (SM_OPCODE) 38 /* call */, 0 }, // state 40 [call] { 1, 1, 0, 1, (SM_OPCODE) 39 /* calli */, 0 }, // state 41 [calli] { 1, 1, 0, 1, (SM_OPCODE) 40 /* ret */, 0 }, // state 42 [ret] { 1, 1, 0, 1, (SM_OPCODE) 41 /* br.s */, 0 }, // state 43 [br.s] { 1, 1, 0, 1, (SM_OPCODE) 42 /* brfalse.s */, 0 }, // state 44 [brfalse.s] { 1, 1, 0, 1, (SM_OPCODE) 43 /* brtrue.s */, 0 }, // state 45 [brtrue.s] { 1, 1, 0, 1, (SM_OPCODE) 44 /* beq.s */, 0 }, // state 46 [beq.s] { 1, 1, 0, 1, (SM_OPCODE) 45 /* bge.s */, 0 }, // state 47 [bge.s] { 1, 1, 0, 1, (SM_OPCODE) 46 /* bgt.s */, 0 }, // state 48 [bgt.s] { 1, 1, 0, 1, (SM_OPCODE) 47 /* ble.s */, 0 }, // state 49 [ble.s] { 1, 1, 0, 1, (SM_OPCODE) 48 /* blt.s */, 0 }, // state 50 [blt.s] { 1, 1, 0, 1, (SM_OPCODE) 49 /* bne.un.s */, 0 }, // state 51 [bne.un.s] { 1, 1, 0, 1, (SM_OPCODE) 50 /* bge.un.s */, 0 }, // state 52 [bge.un.s] { 1, 1, 0, 1, (SM_OPCODE) 51 /* bgt.un.s */, 0 }, // state 53 [bgt.un.s] { 1, 1, 0, 1, (SM_OPCODE) 52 /* ble.un.s */, 0 }, // state 54 [ble.un.s] { 1, 1, 0, 1, (SM_OPCODE) 53 /* blt.un.s */, 0 }, // state 55 [blt.un.s] { 1, 1, 0, 1, (SM_OPCODE) 54 /* long.branch */, 0 }, // state 56 [long.branch] { 1, 1, 0, 1, (SM_OPCODE) 55 /* switch */, 0 }, // state 57 [switch] { 1, 1, 0, 1, (SM_OPCODE) 56 /* ldind.i1 */, 0 }, // state 58 [ldind.i1] { 1, 1, 0, 1, (SM_OPCODE) 57 /* ldind.u1 */, 0 }, // state 59 [ldind.u1] { 1, 1, 0, 1, (SM_OPCODE) 58 /* ldind.i2 */, 0 }, // state 60 [ldind.i2] { 1, 1, 0, 1, (SM_OPCODE) 59 /* ldind.u2 */, 0 }, // state 61 [ldind.u2] { 1, 1, 0, 1, (SM_OPCODE) 60 /* ldind.i4 */, 0 }, // state 62 [ldind.i4] { 1, 1, 0, 1, (SM_OPCODE) 61 /* ldind.u4 */, 0 }, // state 63 [ldind.u4] { 1, 1, 0, 1, (SM_OPCODE) 62 /* ldind.i8 */, 0 }, // state 64 [ldind.i8] { 1, 1, 0, 1, (SM_OPCODE) 63 /* ldind.i */, 0 }, // state 65 [ldind.i] { 1, 1, 0, 1, (SM_OPCODE) 64 /* ldind.r4 */, 0 }, // state 66 [ldind.r4] { 1, 1, 0, 1, (SM_OPCODE) 65 /* ldind.r8 */, 0 }, // state 67 [ldind.r8] { 1, 1, 0, 1, (SM_OPCODE) 66 /* ldind.ref */, 0 }, // state 68 [ldind.ref] { 1, 1, 0, 1, (SM_OPCODE) 67 /* stind.ref */, 0 }, // state 69 [stind.ref] { 1, 1, 0, 1, (SM_OPCODE) 68 /* stind.i1 */, 0 }, // state 70 [stind.i1] { 1, 1, 0, 1, (SM_OPCODE) 69 /* stind.i2 */, 0 }, // state 71 [stind.i2] { 1, 1, 0, 1, (SM_OPCODE) 70 /* stind.i4 */, 0 }, // state 72 [stind.i4] { 1, 1, 0, 1, (SM_OPCODE) 71 /* stind.i8 */, 0 }, // state 73 [stind.i8] { 1, 1, 0, 1, (SM_OPCODE) 72 /* stind.r4 */, 0 }, // state 74 [stind.r4] { 1, 1, 0, 1, (SM_OPCODE) 73 /* stind.r8 */, 0 }, // state 75 [stind.r8] { 1, 1, 0, 1, (SM_OPCODE) 74 /* add */, 0 }, // state 76 [add] { 1, 1, 0, 1, (SM_OPCODE) 75 /* sub */, 0 }, // state 77 [sub] { 1, 1, 0, 1, (SM_OPCODE) 76 /* mul */, 0 }, // state 78 [mul] { 1, 1, 0, 1, (SM_OPCODE) 77 /* div */, 0 }, // state 79 [div] { 1, 1, 0, 1, (SM_OPCODE) 78 /* div.un */, 0 }, // state 80 [div.un] { 1, 1, 0, 1, (SM_OPCODE) 79 /* rem */, 0 }, // state 81 [rem] { 1, 1, 0, 1, (SM_OPCODE) 80 /* rem.un */, 0 }, // state 82 [rem.un] { 1, 1, 0, 1, (SM_OPCODE) 81 /* and */, 0 }, // state 83 [and] { 1, 1, 0, 1, (SM_OPCODE) 82 /* or */, 0 }, // state 84 [or] { 1, 1, 0, 1, (SM_OPCODE) 83 /* xor */, 0 }, // state 85 [xor] { 1, 1, 0, 1, (SM_OPCODE) 84 /* shl */, 0 }, // state 86 [shl] { 1, 1, 0, 1, (SM_OPCODE) 85 /* shr */, 0 }, // state 87 [shr] { 1, 1, 0, 1, (SM_OPCODE) 86 /* shr.un */, 0 }, // state 88 [shr.un] { 1, 1, 0, 1, (SM_OPCODE) 87 /* neg */, 0 }, // state 89 [neg] { 1, 1, 0, 1, (SM_OPCODE) 88 /* not */, 0 }, // state 90 [not] { 1, 1, 0, 1, (SM_OPCODE) 89 /* conv.i1 */, 0 }, // state 91 [conv.i1] { 1, 1, 0, 1, (SM_OPCODE) 90 /* conv.i2 */, 0 }, // state 92 [conv.i2] { 1, 1, 0, 1, (SM_OPCODE) 91 /* conv.i4 */, 0 }, // state 93 [conv.i4] { 1, 1, 0, 1, (SM_OPCODE) 92 /* conv.i8 */, 0 }, // state 94 [conv.i8] { 1, 1, 0, 1, (SM_OPCODE) 93 /* conv.r4 */, 276 }, // state 95 [conv.r4] { 1, 1, 0, 1, (SM_OPCODE) 94 /* conv.r8 */, 256 }, // state 96 [conv.r8] { 1, 1, 0, 1, (SM_OPCODE) 95 /* conv.u4 */, 0 }, // state 97 [conv.u4] { 1, 1, 0, 1, (SM_OPCODE) 96 /* conv.u8 */, 0 }, // state 98 [conv.u8] { 1, 1, 0, 1, (SM_OPCODE) 97 /* callvirt */, 0 }, // state 99 [callvirt] { 1, 1, 0, 1, (SM_OPCODE) 98 /* cpobj */, 0 }, // state 100 [cpobj] { 1, 1, 0, 1, (SM_OPCODE) 99 /* ldobj */, 0 }, // state 101 [ldobj] { 1, 1, 0, 1, (SM_OPCODE)100 /* ldstr */, 0 }, // state 102 [ldstr] { 1, 1, 0, 1, (SM_OPCODE)101 /* newobj */, 0 }, // state 103 [newobj] { 1, 1, 0, 1, (SM_OPCODE)102 /* castclass */, 0 }, // state 104 [castclass] { 1, 1, 0, 1, (SM_OPCODE)103 /* isinst */, 0 }, // state 105 [isinst] { 1, 1, 0, 1, (SM_OPCODE)104 /* conv.r.un */, 0 }, // state 106 [conv.r.un] { 1, 1, 0, 1, (SM_OPCODE)105 /* unbox */, 0 }, // state 107 [unbox] { 1, 1, 0, 1, (SM_OPCODE)106 /* throw */, 0 }, // state 108 [throw] { 1, 1, 0, 1, (SM_OPCODE)107 /* ldfld */, 0 }, // state 109 [ldfld] { 1, 1, 0, 1, (SM_OPCODE)108 /* ldflda */, 0 }, // state 110 [ldflda] { 1, 1, 0, 1, (SM_OPCODE)109 /* stfld */, 0 }, // state 111 [stfld] { 1, 1, 0, 1, (SM_OPCODE)110 /* ldsfld */, 0 }, // state 112 [ldsfld] { 1, 1, 0, 1, (SM_OPCODE)111 /* ldsflda */, 0 }, // state 113 [ldsflda] { 1, 1, 0, 1, (SM_OPCODE)112 /* stsfld */, 0 }, // state 114 [stsfld] { 1, 1, 0, 1, (SM_OPCODE)113 /* stobj */, 0 }, // state 115 [stobj] { 1, 1, 0, 1, (SM_OPCODE)114 /* ovf.notype.un */, 0 }, // state 116 [ovf.notype.un] { 1, 1, 0, 1, (SM_OPCODE)115 /* box */, 0 }, // state 117 [box] { 1, 1, 0, 1, (SM_OPCODE)116 /* newarr */, 0 }, // state 118 [newarr] { 1, 1, 0, 1, (SM_OPCODE)117 /* ldlen */, 0 }, // state 119 [ldlen] { 1, 1, 0, 1, (SM_OPCODE)118 /* ldelema */, 0 }, // state 120 [ldelema] { 1, 1, 0, 1, (SM_OPCODE)119 /* ldelem.i1 */, 0 }, // state 121 [ldelem.i1] { 1, 1, 0, 1, (SM_OPCODE)120 /* ldelem.u1 */, 0 }, // state 122 [ldelem.u1] { 1, 1, 0, 1, (SM_OPCODE)121 /* ldelem.i2 */, 0 }, // state 123 [ldelem.i2] { 1, 1, 0, 1, (SM_OPCODE)122 /* ldelem.u2 */, 0 }, // state 124 [ldelem.u2] { 1, 1, 0, 1, (SM_OPCODE)123 /* ldelem.i4 */, 0 }, // state 125 [ldelem.i4] { 1, 1, 0, 1, (SM_OPCODE)124 /* ldelem.u4 */, 0 }, // state 126 [ldelem.u4] { 1, 1, 0, 1, (SM_OPCODE)125 /* ldelem.i8 */, 0 }, // state 127 [ldelem.i8] { 1, 1, 0, 1, (SM_OPCODE)126 /* ldelem.i */, 0 }, // state 128 [ldelem.i] { 1, 1, 0, 1, (SM_OPCODE)127 /* ldelem.r4 */, 0 }, // state 129 [ldelem.r4] { 1, 1, 0, 1, (SM_OPCODE)128 /* ldelem.r8 */, 0 }, // state 130 [ldelem.r8] { 1, 1, 0, 1, (SM_OPCODE)129 /* ldelem.ref */, 0 }, // state 131 [ldelem.ref] { 1, 1, 0, 1, (SM_OPCODE)130 /* stelem.i */, 0 }, // state 132 [stelem.i] { 1, 1, 0, 1, (SM_OPCODE)131 /* stelem.i1 */, 0 }, // state 133 [stelem.i1] { 1, 1, 0, 1, (SM_OPCODE)132 /* stelem.i2 */, 0 }, // state 134 [stelem.i2] { 1, 1, 0, 1, (SM_OPCODE)133 /* stelem.i4 */, 0 }, // state 135 [stelem.i4] { 1, 1, 0, 1, (SM_OPCODE)134 /* stelem.i8 */, 0 }, // state 136 [stelem.i8] { 1, 1, 0, 1, (SM_OPCODE)135 /* stelem.r4 */, 0 }, // state 137 [stelem.r4] { 1, 1, 0, 1, (SM_OPCODE)136 /* stelem.r8 */, 0 }, // state 138 [stelem.r8] { 1, 1, 0, 1, (SM_OPCODE)137 /* stelem.ref */, 0 }, // state 139 [stelem.ref] { 1, 1, 0, 1, (SM_OPCODE)138 /* ldelem */, 0 }, // state 140 [ldelem] { 1, 1, 0, 1, (SM_OPCODE)139 /* stelem */, 0 }, // state 141 [stelem] { 1, 1, 0, 1, (SM_OPCODE)140 /* unbox.any */, 0 }, // state 142 [unbox.any] { 1, 1, 0, 1, (SM_OPCODE)141 /* conv.ovf.i1 */, 0 }, // state 143 [conv.ovf.i1] { 1, 1, 0, 1, (SM_OPCODE)142 /* conv.ovf.u1 */, 0 }, // state 144 [conv.ovf.u1] { 1, 1, 0, 1, (SM_OPCODE)143 /* conv.ovf.i2 */, 0 }, // state 145 [conv.ovf.i2] { 1, 1, 0, 1, (SM_OPCODE)144 /* conv.ovf.u2 */, 0 }, // state 146 [conv.ovf.u2] { 1, 1, 0, 1, (SM_OPCODE)145 /* conv.ovf.i4 */, 0 }, // state 147 [conv.ovf.i4] { 1, 1, 0, 1, (SM_OPCODE)146 /* conv.ovf.u4 */, 0 }, // state 148 [conv.ovf.u4] { 1, 1, 0, 1, (SM_OPCODE)147 /* conv.ovf.i8 */, 0 }, // state 149 [conv.ovf.i8] { 1, 1, 0, 1, (SM_OPCODE)148 /* conv.ovf.u8 */, 0 }, // state 150 [conv.ovf.u8] { 1, 1, 0, 1, (SM_OPCODE)149 /* refanyval */, 0 }, // state 151 [refanyval] { 1, 1, 0, 1, (SM_OPCODE)150 /* ckfinite */, 0 }, // state 152 [ckfinite] { 1, 1, 0, 1, (SM_OPCODE)151 /* mkrefany */, 0 }, // state 153 [mkrefany] { 1, 1, 0, 1, (SM_OPCODE)152 /* ldtoken */, 0 }, // state 154 [ldtoken] { 1, 1, 0, 1, (SM_OPCODE)153 /* conv.u2 */, 0 }, // state 155 [conv.u2] { 1, 1, 0, 1, (SM_OPCODE)154 /* conv.u1 */, 0 }, // state 156 [conv.u1] { 1, 1, 0, 1, (SM_OPCODE)155 /* conv.i */, 0 }, // state 157 [conv.i] { 1, 1, 0, 1, (SM_OPCODE)156 /* conv.ovf.i */, 0 }, // state 158 [conv.ovf.i] { 1, 1, 0, 1, (SM_OPCODE)157 /* conv.ovf.u */, 0 }, // state 159 [conv.ovf.u] { 1, 1, 0, 1, (SM_OPCODE)158 /* add.ovf */, 0 }, // state 160 [add.ovf] { 1, 1, 0, 1, (SM_OPCODE)159 /* mul.ovf */, 0 }, // state 161 [mul.ovf] { 1, 1, 0, 1, (SM_OPCODE)160 /* sub.ovf */, 0 }, // state 162 [sub.ovf] { 1, 1, 0, 1, (SM_OPCODE)161 /* leave.s */, 0 }, // state 163 [leave.s] { 1, 1, 0, 1, (SM_OPCODE)162 /* stind.i */, 0 }, // state 164 [stind.i] { 1, 1, 0, 1, (SM_OPCODE)163 /* conv.u */, 0 }, // state 165 [conv.u] { 1, 1, 0, 1, (SM_OPCODE)164 /* prefix.n */, 0 }, // state 166 [prefix.n] { 1, 1, 0, 1, (SM_OPCODE)165 /* arglist */, 0 }, // state 167 [arglist] { 1, 1, 0, 1, (SM_OPCODE)166 /* ceq */, 0 }, // state 168 [ceq] { 1, 1, 0, 1, (SM_OPCODE)167 /* cgt */, 0 }, // state 169 [cgt] { 1, 1, 0, 1, (SM_OPCODE)168 /* cgt.un */, 0 }, // state 170 [cgt.un] { 1, 1, 0, 1, (SM_OPCODE)169 /* clt */, 0 }, // state 171 [clt] { 1, 1, 0, 1, (SM_OPCODE)170 /* clt.un */, 0 }, // state 172 [clt.un] { 1, 1, 0, 1, (SM_OPCODE)171 /* ldftn */, 0 }, // state 173 [ldftn] { 1, 1, 0, 1, (SM_OPCODE)172 /* ldvirtftn */, 0 }, // state 174 [ldvirtftn] { 1, 1, 0, 1, (SM_OPCODE)173 /* long.loc.arg */, 0 }, // state 175 [long.loc.arg] { 1, 1, 0, 1, (SM_OPCODE)174 /* localloc */, 0 }, // state 176 [localloc] { 1, 1, 0, 1, (SM_OPCODE)175 /* unaligned */, 0 }, // state 177 [unaligned] { 1, 1, 0, 1, (SM_OPCODE)176 /* volatile */, 0 }, // state 178 [volatile] { 1, 1, 0, 1, (SM_OPCODE)177 /* tailcall */, 0 }, // state 179 [tailcall] { 1, 1, 0, 1, (SM_OPCODE)178 /* initobj */, 0 }, // state 180 [initobj] { 1, 1, 0, 1, (SM_OPCODE)179 /* constrained */, 218 }, // state 181 [constrained] { 1, 1, 0, 1, (SM_OPCODE)180 /* cpblk */, 0 }, // state 182 [cpblk] { 1, 1, 0, 1, (SM_OPCODE)181 /* initblk */, 0 }, // state 183 [initblk] { 1, 1, 0, 1, (SM_OPCODE)182 /* rethrow */, 0 }, // state 184 [rethrow] { 1, 1, 0, 1, (SM_OPCODE)183 /* sizeof */, 0 }, // state 185 [sizeof] { 1, 1, 0, 1, (SM_OPCODE)184 /* refanytype */, 0 }, // state 186 [refanytype] { 1, 1, 0, 1, (SM_OPCODE)185 /* readonly */, 0 }, // state 187 [readonly] { 1, 1, 0, 1, (SM_OPCODE)186 /* ldarga.s.normed */, 218 }, // state 188 [ldarga.s.normed] { 1, 1, 0, 1, (SM_OPCODE)187 /* ldloca.s.normed */, 220 }, // state 189 [ldloca.s.normed] { 1, 2, 181, 181, (SM_OPCODE) 97 /* callvirt */, 0 }, // state 190 [constrained -> callvirt] { 1, 2, 3, 3, (SM_OPCODE)107 /* ldfld */, 432 }, // state 191 [ldarg.0 -> ldfld] { 1, 2, 4, 4, (SM_OPCODE)107 /* ldfld */, 0 }, // state 192 [ldarg.1 -> ldfld] { 1, 2, 5, 5, (SM_OPCODE)107 /* ldfld */, 0 }, // state 193 [ldarg.2 -> ldfld] { 1, 2, 6, 6, (SM_OPCODE)107 /* ldfld */, 0 }, // state 194 [ldarg.3 -> ldfld] { 1, 2, 16, 16, (SM_OPCODE)107 /* ldfld */, 414 }, // state 195 [ldarga.s -> ldfld] { 1, 2, 19, 19, (SM_OPCODE)107 /* ldfld */, 0 }, // state 196 [ldloca.s -> ldfld] { 1, 2, 188, 188, (SM_OPCODE)107 /* ldfld */, 0 }, // state 197 [ldarga.s.normed -> ldfld] { 1, 2, 189, 189, (SM_OPCODE)107 /* ldfld */, 0 }, // state 198 [ldloca.s.normed -> ldfld] { 1, 2, 11, 11, (SM_OPCODE) 5 /* ldloc.0 */, 0 }, // state 199 [stloc.0 -> ldloc.0] { 1, 2, 12, 12, (SM_OPCODE) 6 /* ldloc.1 */, 0 }, // state 200 [stloc.1 -> ldloc.1] { 1, 2, 13, 13, (SM_OPCODE) 7 /* ldloc.2 */, 0 }, // state 201 [stloc.2 -> ldloc.2] { 1, 2, 14, 14, (SM_OPCODE) 8 /* ldloc.3 */, 0 }, // state 202 [stloc.3 -> ldloc.3] { 1, 2, 35, 35, (SM_OPCODE) 74 /* add */, 0 }, // state 203 [ldc.r4 -> add] { 1, 2, 35, 35, (SM_OPCODE) 75 /* sub */, 0 }, // state 204 [ldc.r4 -> sub] { 1, 2, 35, 35, (SM_OPCODE) 76 /* mul */, 0 }, // state 205 [ldc.r4 -> mul] { 1, 2, 35, 35, (SM_OPCODE) 77 /* div */, 0 }, // state 206 [ldc.r4 -> div] { 1, 2, 36, 36, (SM_OPCODE) 74 /* add */, 0 }, // state 207 [ldc.r8 -> add] { 1, 2, 36, 36, (SM_OPCODE) 75 /* sub */, 0 }, // state 208 [ldc.r8 -> sub] { 1, 2, 36, 36, (SM_OPCODE) 76 /* mul */, 0 }, // state 209 [ldc.r8 -> mul] { 1, 2, 36, 36, (SM_OPCODE) 77 /* div */, 0 }, // state 210 [ldc.r8 -> div] { 1, 2, 95, 95, (SM_OPCODE) 74 /* add */, 0 }, // state 211 [conv.r4 -> add] { 1, 2, 95, 95, (SM_OPCODE) 75 /* sub */, 0 }, // state 212 [conv.r4 -> sub] { 1, 2, 95, 95, (SM_OPCODE) 76 /* mul */, 0 }, // state 213 [conv.r4 -> mul] { 1, 2, 95, 95, (SM_OPCODE) 77 /* div */, 0 }, // state 214 [conv.r4 -> div] { 1, 2, 96, 96, (SM_OPCODE) 76 /* mul */, 0 }, // state 215 [conv.r8 -> mul] { 1, 2, 96, 96, (SM_OPCODE) 77 /* div */, 0 }, // state 216 [conv.r8 -> div] { 0, 2, 3, 3, (SM_OPCODE) 21 /* ldc.i4.0 */, 228 }, // state 217 [ldarg.0 -> ldc.i4.0] { 1, 3, 3, 217, (SM_OPCODE)109 /* stfld */, 0 }, // state 218 [ldarg.0 -> ldc.i4.0 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 33 /* ldc.r4 */, 230 }, // state 219 [ldarg.0 -> ldc.r4] { 1, 3, 3, 219, (SM_OPCODE)109 /* stfld */, 0 }, // state 220 [ldarg.0 -> ldc.r4 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 34 /* ldc.r8 */, 232 }, // state 221 [ldarg.0 -> ldc.r8] { 1, 3, 3, 221, (SM_OPCODE)109 /* stfld */, 0 }, // state 222 [ldarg.0 -> ldc.r8 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 2 /* ldarg.1 */, 238 }, // state 223 [ldarg.0 -> ldarg.1] { 0, 3, 3, 223, (SM_OPCODE)107 /* ldfld */, 236 }, // state 224 [ldarg.0 -> ldarg.1 -> ldfld] { 1, 4, 3, 224, (SM_OPCODE)109 /* stfld */, 0 }, // state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld] { 1, 3, 3, 223, (SM_OPCODE)109 /* stfld */, 0 }, // state 226 [ldarg.0 -> ldarg.1 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 3 /* ldarg.2 */, 240 }, // state 227 [ldarg.0 -> ldarg.2] { 1, 3, 3, 227, (SM_OPCODE)109 /* stfld */, 0 }, // state 228 [ldarg.0 -> ldarg.2 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 4 /* ldarg.3 */, 242 }, // state 229 [ldarg.0 -> ldarg.3] { 1, 3, 3, 229, (SM_OPCODE)109 /* stfld */, 0 }, // state 230 [ldarg.0 -> ldarg.3 -> stfld] { 0, 2, 3, 3, (SM_OPCODE) 36 /* dup */, 248 }, // state 231 [ldarg.0 -> dup] { 0, 3, 3, 231, (SM_OPCODE)107 /* ldfld */, 460 }, // state 232 [ldarg.0 -> dup -> ldfld] { 0, 4, 3, 232, (SM_OPCODE) 2 /* ldarg.1 */, 318 }, // state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] { 0, 5, 3, 233, (SM_OPCODE) 74 /* add */, 256 }, // state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] { 1, 6, 3, 234, (SM_OPCODE)109 /* stfld */, 0 }, // state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 75 /* sub */, 258 }, // state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] { 1, 6, 3, 236, (SM_OPCODE)109 /* stfld */, 0 }, // state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 76 /* mul */, 260 }, // state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] { 1, 6, 3, 238, (SM_OPCODE)109 /* stfld */, 0 }, // state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld] { 0, 5, 3, 233, (SM_OPCODE) 77 /* div */, 262 }, // state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] { 1, 6, 3, 240, (SM_OPCODE)109 /* stfld */, 0 }, // state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld] { 0, 3, 191, 191, (SM_OPCODE) 2 /* ldarg.1 */, 268 }, // state 242 [ldarg.0 -> ldfld -> ldarg.1] { 0, 4, 191, 242, (SM_OPCODE)107 /* ldfld */, 336 }, // state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] { 1, 5, 191, 243, (SM_OPCODE) 74 /* add */, 0 }, // state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add] { 1, 5, 191, 243, (SM_OPCODE) 75 /* sub */, 0 }, // state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub] { 0, 3, 195, 195, (SM_OPCODE) 14 /* ldarga.s */, 274 }, // state 246 [ldarga.s -> ldfld -> ldarga.s] { 0, 4, 195, 246, (SM_OPCODE)107 /* ldfld */, 342 }, // state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] { 1, 5, 195, 247, (SM_OPCODE) 74 /* add */, 0 }, // state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add] { 1, 5, 195, 247, (SM_OPCODE) 75 /* sub */, 0 }, // state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub] }; // clang-format on static_assert_no_msg(NUM_SM_STATES == ArrLen(g_SMStates)); const SMState* gp_SMStates = g_SMStates; // // JumpTableCells in the state machine // // clang-format off const JumpTableCell g_SMJumpTableCells[] = { // {src, dest } { 1, 2 }, // cell# 0 : state 1 [start] --(0 noshow)--> state 2 [noshow] { 1, 3 }, // cell# 1 : state 1 [start] --(1 ldarg.0)--> state 3 [ldarg.0] { 1, 4 }, // cell# 2 : state 1 [start] --(2 ldarg.1)--> state 4 [ldarg.1] { 1, 5 }, // cell# 3 : state 1 [start] --(3 ldarg.2)--> state 5 [ldarg.2] { 1, 6 }, // cell# 4 : state 1 [start] --(4 ldarg.3)--> state 6 [ldarg.3] { 1, 7 }, // cell# 5 : state 1 [start] --(5 ldloc.0)--> state 7 [ldloc.0] { 1, 8 }, // cell# 6 : state 1 [start] --(6 ldloc.1)--> state 8 [ldloc.1] { 1, 9 }, // cell# 7 : state 1 [start] --(7 ldloc.2)--> state 9 [ldloc.2] { 1, 10 }, // cell# 8 : state 1 [start] --(8 ldloc.3)--> state 10 [ldloc.3] { 1, 11 }, // cell# 9 : state 1 [start] --(9 stloc.0)--> state 11 [stloc.0] { 1, 12 }, // cell# 10 : state 1 [start] --(10 stloc.1)--> state 12 [stloc.1] { 1, 13 }, // cell# 11 : state 1 [start] --(11 stloc.2)--> state 13 [stloc.2] { 1, 14 }, // cell# 12 : state 1 [start] --(12 stloc.3)--> state 14 [stloc.3] { 1, 15 }, // cell# 13 : state 1 [start] --(13 ldarg.s)--> state 15 [ldarg.s] { 1, 16 }, // cell# 14 : state 1 [start] --(14 ldarga.s)--> state 16 [ldarga.s] { 1, 17 }, // cell# 15 : state 1 [start] --(15 starg.s)--> state 17 [starg.s] { 1, 18 }, // cell# 16 : state 1 [start] --(16 ldloc.s)--> state 18 [ldloc.s] { 1, 19 }, // cell# 17 : state 1 [start] --(17 ldloca.s)--> state 19 [ldloca.s] { 1, 20 }, // cell# 18 : state 1 [start] --(18 stloc.s)--> state 20 [stloc.s] { 1, 21 }, // cell# 19 : state 1 [start] --(19 ldnull)--> state 21 [ldnull] { 1, 22 }, // cell# 20 : state 1 [start] --(20 ldc.i4.m1)--> state 22 [ldc.i4.m1] { 1, 23 }, // cell# 21 : state 1 [start] --(21 ldc.i4.0)--> state 23 [ldc.i4.0] { 1, 24 }, // cell# 22 : state 1 [start] --(22 ldc.i4.1)--> state 24 [ldc.i4.1] { 1, 25 }, // cell# 23 : state 1 [start] --(23 ldc.i4.2)--> state 25 [ldc.i4.2] { 1, 26 }, // cell# 24 : state 1 [start] --(24 ldc.i4.3)--> state 26 [ldc.i4.3] { 1, 27 }, // cell# 25 : state 1 [start] --(25 ldc.i4.4)--> state 27 [ldc.i4.4] { 1, 28 }, // cell# 26 : state 1 [start] --(26 ldc.i4.5)--> state 28 [ldc.i4.5] { 1, 29 }, // cell# 27 : state 1 [start] --(27 ldc.i4.6)--> state 29 [ldc.i4.6] { 1, 30 }, // cell# 28 : state 1 [start] --(28 ldc.i4.7)--> state 30 [ldc.i4.7] { 1, 31 }, // cell# 29 : state 1 [start] --(29 ldc.i4.8)--> state 31 [ldc.i4.8] { 1, 32 }, // cell# 30 : state 1 [start] --(30 ldc.i4.s)--> state 32 [ldc.i4.s] { 1, 33 }, // cell# 31 : state 1 [start] --(31 ldc.i4)--> state 33 [ldc.i4] { 1, 34 }, // cell# 32 : state 1 [start] --(32 ldc.i8)--> state 34 [ldc.i8] { 1, 35 }, // cell# 33 : state 1 [start] --(33 ldc.r4)--> state 35 [ldc.r4] { 1, 36 }, // cell# 34 : state 1 [start] --(34 ldc.r8)--> state 36 [ldc.r8] { 1, 37 }, // cell# 35 : state 1 [start] --(35 unused)--> state 37 [unused] { 1, 38 }, // cell# 36 : state 1 [start] --(36 dup)--> state 38 [dup] { 1, 39 }, // cell# 37 : state 1 [start] --(37 pop)--> state 39 [pop] { 1, 40 }, // cell# 38 : state 1 [start] --(38 call)--> state 40 [call] { 1, 41 }, // cell# 39 : state 1 [start] --(39 calli)--> state 41 [calli] { 1, 42 }, // cell# 40 : state 1 [start] --(40 ret)--> state 42 [ret] { 1, 43 }, // cell# 41 : state 1 [start] --(41 br.s)--> state 43 [br.s] { 1, 44 }, // cell# 42 : state 1 [start] --(42 brfalse.s)--> state 44 [brfalse.s] { 1, 45 }, // cell# 43 : state 1 [start] --(43 brtrue.s)--> state 45 [brtrue.s] { 1, 46 }, // cell# 44 : state 1 [start] --(44 beq.s)--> state 46 [beq.s] { 1, 47 }, // cell# 45 : state 1 [start] --(45 bge.s)--> state 47 [bge.s] { 1, 48 }, // cell# 46 : state 1 [start] --(46 bgt.s)--> state 48 [bgt.s] { 1, 49 }, // cell# 47 : state 1 [start] --(47 ble.s)--> state 49 [ble.s] { 1, 50 }, // cell# 48 : state 1 [start] --(48 blt.s)--> state 50 [blt.s] { 1, 51 }, // cell# 49 : state 1 [start] --(49 bne.un.s)--> state 51 [bne.un.s] { 1, 52 }, // cell# 50 : state 1 [start] --(50 bge.un.s)--> state 52 [bge.un.s] { 1, 53 }, // cell# 51 : state 1 [start] --(51 bgt.un.s)--> state 53 [bgt.un.s] { 1, 54 }, // cell# 52 : state 1 [start] --(52 ble.un.s)--> state 54 [ble.un.s] { 1, 55 }, // cell# 53 : state 1 [start] --(53 blt.un.s)--> state 55 [blt.un.s] { 1, 56 }, // cell# 54 : state 1 [start] --(54 long.branch)--> state 56 [long.branch] { 1, 57 }, // cell# 55 : state 1 [start] --(55 switch)--> state 57 [switch] { 1, 58 }, // cell# 56 : state 1 [start] --(56 ldind.i1)--> state 58 [ldind.i1] { 1, 59 }, // cell# 57 : state 1 [start] --(57 ldind.u1)--> state 59 [ldind.u1] { 1, 60 }, // cell# 58 : state 1 [start] --(58 ldind.i2)--> state 60 [ldind.i2] { 1, 61 }, // cell# 59 : state 1 [start] --(59 ldind.u2)--> state 61 [ldind.u2] { 1, 62 }, // cell# 60 : state 1 [start] --(60 ldind.i4)--> state 62 [ldind.i4] { 1, 63 }, // cell# 61 : state 1 [start] --(61 ldind.u4)--> state 63 [ldind.u4] { 1, 64 }, // cell# 62 : state 1 [start] --(62 ldind.i8)--> state 64 [ldind.i8] { 1, 65 }, // cell# 63 : state 1 [start] --(63 ldind.i)--> state 65 [ldind.i] { 1, 66 }, // cell# 64 : state 1 [start] --(64 ldind.r4)--> state 66 [ldind.r4] { 1, 67 }, // cell# 65 : state 1 [start] --(65 ldind.r8)--> state 67 [ldind.r8] { 1, 68 }, // cell# 66 : state 1 [start] --(66 ldind.ref)--> state 68 [ldind.ref] { 1, 69 }, // cell# 67 : state 1 [start] --(67 stind.ref)--> state 69 [stind.ref] { 1, 70 }, // cell# 68 : state 1 [start] --(68 stind.i1)--> state 70 [stind.i1] { 1, 71 }, // cell# 69 : state 1 [start] --(69 stind.i2)--> state 71 [stind.i2] { 1, 72 }, // cell# 70 : state 1 [start] --(70 stind.i4)--> state 72 [stind.i4] { 1, 73 }, // cell# 71 : state 1 [start] --(71 stind.i8)--> state 73 [stind.i8] { 1, 74 }, // cell# 72 : state 1 [start] --(72 stind.r4)--> state 74 [stind.r4] { 1, 75 }, // cell# 73 : state 1 [start] --(73 stind.r8)--> state 75 [stind.r8] { 1, 76 }, // cell# 74 : state 1 [start] --(74 add)--> state 76 [add] { 1, 77 }, // cell# 75 : state 1 [start] --(75 sub)--> state 77 [sub] { 1, 78 }, // cell# 76 : state 1 [start] --(76 mul)--> state 78 [mul] { 1, 79 }, // cell# 77 : state 1 [start] --(77 div)--> state 79 [div] { 1, 80 }, // cell# 78 : state 1 [start] --(78 div.un)--> state 80 [div.un] { 1, 81 }, // cell# 79 : state 1 [start] --(79 rem)--> state 81 [rem] { 1, 82 }, // cell# 80 : state 1 [start] --(80 rem.un)--> state 82 [rem.un] { 1, 83 }, // cell# 81 : state 1 [start] --(81 and)--> state 83 [and] { 1, 84 }, // cell# 82 : state 1 [start] --(82 or)--> state 84 [or] { 1, 85 }, // cell# 83 : state 1 [start] --(83 xor)--> state 85 [xor] { 1, 86 }, // cell# 84 : state 1 [start] --(84 shl)--> state 86 [shl] { 1, 87 }, // cell# 85 : state 1 [start] --(85 shr)--> state 87 [shr] { 1, 88 }, // cell# 86 : state 1 [start] --(86 shr.un)--> state 88 [shr.un] { 1, 89 }, // cell# 87 : state 1 [start] --(87 neg)--> state 89 [neg] { 1, 90 }, // cell# 88 : state 1 [start] --(88 not)--> state 90 [not] { 1, 91 }, // cell# 89 : state 1 [start] --(89 conv.i1)--> state 91 [conv.i1] { 1, 92 }, // cell# 90 : state 1 [start] --(90 conv.i2)--> state 92 [conv.i2] { 1, 93 }, // cell# 91 : state 1 [start] --(91 conv.i4)--> state 93 [conv.i4] { 1, 94 }, // cell# 92 : state 1 [start] --(92 conv.i8)--> state 94 [conv.i8] { 1, 95 }, // cell# 93 : state 1 [start] --(93 conv.r4)--> state 95 [conv.r4] { 1, 96 }, // cell# 94 : state 1 [start] --(94 conv.r8)--> state 96 [conv.r8] { 1, 97 }, // cell# 95 : state 1 [start] --(95 conv.u4)--> state 97 [conv.u4] { 1, 98 }, // cell# 96 : state 1 [start] --(96 conv.u8)--> state 98 [conv.u8] { 1, 99 }, // cell# 97 : state 1 [start] --(97 callvirt)--> state 99 [callvirt] { 1, 100 }, // cell# 98 : state 1 [start] --(98 cpobj)--> state 100 [cpobj] { 1, 101 }, // cell# 99 : state 1 [start] --(99 ldobj)--> state 101 [ldobj] { 1, 102 }, // cell# 100 : state 1 [start] --(100 ldstr)--> state 102 [ldstr] { 1, 103 }, // cell# 101 : state 1 [start] --(101 newobj)--> state 103 [newobj] { 1, 104 }, // cell# 102 : state 1 [start] --(102 castclass)--> state 104 [castclass] { 1, 105 }, // cell# 103 : state 1 [start] --(103 isinst)--> state 105 [isinst] { 1, 106 }, // cell# 104 : state 1 [start] --(104 conv.r.un)--> state 106 [conv.r.un] { 1, 107 }, // cell# 105 : state 1 [start] --(105 unbox)--> state 107 [unbox] { 1, 108 }, // cell# 106 : state 1 [start] --(106 throw)--> state 108 [throw] { 1, 109 }, // cell# 107 : state 1 [start] --(107 ldfld)--> state 109 [ldfld] { 1, 110 }, // cell# 108 : state 1 [start] --(108 ldflda)--> state 110 [ldflda] { 1, 111 }, // cell# 109 : state 1 [start] --(109 stfld)--> state 111 [stfld] { 1, 112 }, // cell# 110 : state 1 [start] --(110 ldsfld)--> state 112 [ldsfld] { 1, 113 }, // cell# 111 : state 1 [start] --(111 ldsflda)--> state 113 [ldsflda] { 1, 114 }, // cell# 112 : state 1 [start] --(112 stsfld)--> state 114 [stsfld] { 1, 115 }, // cell# 113 : state 1 [start] --(113 stobj)--> state 115 [stobj] { 1, 116 }, // cell# 114 : state 1 [start] --(114 ovf.notype.un)--> state 116 [ovf.notype.un] { 1, 117 }, // cell# 115 : state 1 [start] --(115 box)--> state 117 [box] { 1, 118 }, // cell# 116 : state 1 [start] --(116 newarr)--> state 118 [newarr] { 1, 119 }, // cell# 117 : state 1 [start] --(117 ldlen)--> state 119 [ldlen] { 1, 120 }, // cell# 118 : state 1 [start] --(118 ldelema)--> state 120 [ldelema] { 1, 121 }, // cell# 119 : state 1 [start] --(119 ldelem.i1)--> state 121 [ldelem.i1] { 1, 122 }, // cell# 120 : state 1 [start] --(120 ldelem.u1)--> state 122 [ldelem.u1] { 1, 123 }, // cell# 121 : state 1 [start] --(121 ldelem.i2)--> state 123 [ldelem.i2] { 1, 124 }, // cell# 122 : state 1 [start] --(122 ldelem.u2)--> state 124 [ldelem.u2] { 1, 125 }, // cell# 123 : state 1 [start] --(123 ldelem.i4)--> state 125 [ldelem.i4] { 1, 126 }, // cell# 124 : state 1 [start] --(124 ldelem.u4)--> state 126 [ldelem.u4] { 1, 127 }, // cell# 125 : state 1 [start] --(125 ldelem.i8)--> state 127 [ldelem.i8] { 1, 128 }, // cell# 126 : state 1 [start] --(126 ldelem.i)--> state 128 [ldelem.i] { 1, 129 }, // cell# 127 : state 1 [start] --(127 ldelem.r4)--> state 129 [ldelem.r4] { 1, 130 }, // cell# 128 : state 1 [start] --(128 ldelem.r8)--> state 130 [ldelem.r8] { 1, 131 }, // cell# 129 : state 1 [start] --(129 ldelem.ref)--> state 131 [ldelem.ref] { 1, 132 }, // cell# 130 : state 1 [start] --(130 stelem.i)--> state 132 [stelem.i] { 1, 133 }, // cell# 131 : state 1 [start] --(131 stelem.i1)--> state 133 [stelem.i1] { 1, 134 }, // cell# 132 : state 1 [start] --(132 stelem.i2)--> state 134 [stelem.i2] { 1, 135 }, // cell# 133 : state 1 [start] --(133 stelem.i4)--> state 135 [stelem.i4] { 1, 136 }, // cell# 134 : state 1 [start] --(134 stelem.i8)--> state 136 [stelem.i8] { 1, 137 }, // cell# 135 : state 1 [start] --(135 stelem.r4)--> state 137 [stelem.r4] { 1, 138 }, // cell# 136 : state 1 [start] --(136 stelem.r8)--> state 138 [stelem.r8] { 1, 139 }, // cell# 137 : state 1 [start] --(137 stelem.ref)--> state 139 [stelem.ref] { 1, 140 }, // cell# 138 : state 1 [start] --(138 ldelem)--> state 140 [ldelem] { 1, 141 }, // cell# 139 : state 1 [start] --(139 stelem)--> state 141 [stelem] { 1, 142 }, // cell# 140 : state 1 [start] --(140 unbox.any)--> state 142 [unbox.any] { 1, 143 }, // cell# 141 : state 1 [start] --(141 conv.ovf.i1)--> state 143 [conv.ovf.i1] { 1, 144 }, // cell# 142 : state 1 [start] --(142 conv.ovf.u1)--> state 144 [conv.ovf.u1] { 1, 145 }, // cell# 143 : state 1 [start] --(143 conv.ovf.i2)--> state 145 [conv.ovf.i2] { 1, 146 }, // cell# 144 : state 1 [start] --(144 conv.ovf.u2)--> state 146 [conv.ovf.u2] { 1, 147 }, // cell# 145 : state 1 [start] --(145 conv.ovf.i4)--> state 147 [conv.ovf.i4] { 1, 148 }, // cell# 146 : state 1 [start] --(146 conv.ovf.u4)--> state 148 [conv.ovf.u4] { 1, 149 }, // cell# 147 : state 1 [start] --(147 conv.ovf.i8)--> state 149 [conv.ovf.i8] { 1, 150 }, // cell# 148 : state 1 [start] --(148 conv.ovf.u8)--> state 150 [conv.ovf.u8] { 1, 151 }, // cell# 149 : state 1 [start] --(149 refanyval)--> state 151 [refanyval] { 1, 152 }, // cell# 150 : state 1 [start] --(150 ckfinite)--> state 152 [ckfinite] { 1, 153 }, // cell# 151 : state 1 [start] --(151 mkrefany)--> state 153 [mkrefany] { 1, 154 }, // cell# 152 : state 1 [start] --(152 ldtoken)--> state 154 [ldtoken] { 1, 155 }, // cell# 153 : state 1 [start] --(153 conv.u2)--> state 155 [conv.u2] { 1, 156 }, // cell# 154 : state 1 [start] --(154 conv.u1)--> state 156 [conv.u1] { 1, 157 }, // cell# 155 : state 1 [start] --(155 conv.i)--> state 157 [conv.i] { 1, 158 }, // cell# 156 : state 1 [start] --(156 conv.ovf.i)--> state 158 [conv.ovf.i] { 1, 159 }, // cell# 157 : state 1 [start] --(157 conv.ovf.u)--> state 159 [conv.ovf.u] { 1, 160 }, // cell# 158 : state 1 [start] --(158 add.ovf)--> state 160 [add.ovf] { 1, 161 }, // cell# 159 : state 1 [start] --(159 mul.ovf)--> state 161 [mul.ovf] { 1, 162 }, // cell# 160 : state 1 [start] --(160 sub.ovf)--> state 162 [sub.ovf] { 1, 163 }, // cell# 161 : state 1 [start] --(161 leave.s)--> state 163 [leave.s] { 1, 164 }, // cell# 162 : state 1 [start] --(162 stind.i)--> state 164 [stind.i] { 1, 165 }, // cell# 163 : state 1 [start] --(163 conv.u)--> state 165 [conv.u] { 1, 166 }, // cell# 164 : state 1 [start] --(164 prefix.n)--> state 166 [prefix.n] { 1, 167 }, // cell# 165 : state 1 [start] --(165 arglist)--> state 167 [arglist] { 1, 168 }, // cell# 166 : state 1 [start] --(166 ceq)--> state 168 [ceq] { 1, 169 }, // cell# 167 : state 1 [start] --(167 cgt)--> state 169 [cgt] { 1, 170 }, // cell# 168 : state 1 [start] --(168 cgt.un)--> state 170 [cgt.un] { 1, 171 }, // cell# 169 : state 1 [start] --(169 clt)--> state 171 [clt] { 1, 172 }, // cell# 170 : state 1 [start] --(170 clt.un)--> state 172 [clt.un] { 1, 173 }, // cell# 171 : state 1 [start] --(171 ldftn)--> state 173 [ldftn] { 1, 174 }, // cell# 172 : state 1 [start] --(172 ldvirtftn)--> state 174 [ldvirtftn] { 1, 175 }, // cell# 173 : state 1 [start] --(173 long.loc.arg)--> state 175 [long.loc.arg] { 1, 176 }, // cell# 174 : state 1 [start] --(174 localloc)--> state 176 [localloc] { 1, 177 }, // cell# 175 : state 1 [start] --(175 unaligned)--> state 177 [unaligned] { 1, 178 }, // cell# 176 : state 1 [start] --(176 volatile)--> state 178 [volatile] { 1, 179 }, // cell# 177 : state 1 [start] --(177 tailcall)--> state 179 [tailcall] { 1, 180 }, // cell# 178 : state 1 [start] --(178 initobj)--> state 180 [initobj] { 1, 181 }, // cell# 179 : state 1 [start] --(179 constrained)--> state 181 [constrained] { 1, 182 }, // cell# 180 : state 1 [start] --(180 cpblk)--> state 182 [cpblk] { 1, 183 }, // cell# 181 : state 1 [start] --(181 initblk)--> state 183 [initblk] { 1, 184 }, // cell# 182 : state 1 [start] --(182 rethrow)--> state 184 [rethrow] { 1, 185 }, // cell# 183 : state 1 [start] --(183 sizeof)--> state 185 [sizeof] { 1, 186 }, // cell# 184 : state 1 [start] --(184 refanytype)--> state 186 [refanytype] { 1, 187 }, // cell# 185 : state 1 [start] --(185 readonly)--> state 187 [readonly] { 1, 188 }, // cell# 186 : state 1 [start] --(186 ldarga.s.normed)--> state 188 [ldarga.s.normed] { 1, 189 }, // cell# 187 : state 1 [start] --(187 ldloca.s.normed)--> state 189 [ldloca.s.normed] { 3, 223 }, // cell# 188 : state 3 [ldarg.0] --(2 ldarg.1)--> state 223 [ldarg.0 -> ldarg.1] { 3, 227 }, // cell# 189 : state 3 [ldarg.0] --(3 ldarg.2)--> state 227 [ldarg.0 -> ldarg.2] { 3, 229 }, // cell# 190 : state 3 [ldarg.0] --(4 ldarg.3)--> state 229 [ldarg.0 -> ldarg.3] { 4, 192 }, // cell# 191 : state 4 [ldarg.1] --(107 ldfld)--> state 192 [ldarg.1 -> ldfld] { 5, 193 }, // cell# 192 : state 5 [ldarg.2] --(107 ldfld)--> state 193 [ldarg.2 -> ldfld] { 6, 194 }, // cell# 193 : state 6 [ldarg.3] --(107 ldfld)--> state 194 [ldarg.3 -> ldfld] { 11, 199 }, // cell# 194 : state 11 [stloc.0] --(5 ldloc.0)--> state 199 [stloc.0 -> ldloc.0] { 12, 200 }, // cell# 195 : state 12 [stloc.1] --(6 ldloc.1)--> state 200 [stloc.1 -> ldloc.1] { 13, 201 }, // cell# 196 : state 13 [stloc.2] --(7 ldloc.2)--> state 201 [stloc.2 -> ldloc.2] { 14, 202 }, // cell# 197 : state 14 [stloc.3] --(8 ldloc.3)--> state 202 [stloc.3 -> ldloc.3] { 16, 195 }, // cell# 198 : state 16 [ldarga.s] --(107 ldfld)--> state 195 [ldarga.s -> ldfld] { 19, 196 }, // cell# 199 : state 19 [ldloca.s] --(107 ldfld)--> state 196 [ldloca.s -> ldfld] { 35, 203 }, // cell# 200 : state 35 [ldc.r4] --(74 add)--> state 203 [ldc.r4 -> add] { 35, 204 }, // cell# 201 : state 35 [ldc.r4] --(75 sub)--> state 204 [ldc.r4 -> sub] { 35, 205 }, // cell# 202 : state 35 [ldc.r4] --(76 mul)--> state 205 [ldc.r4 -> mul] { 35, 206 }, // cell# 203 : state 35 [ldc.r4] --(77 div)--> state 206 [ldc.r4 -> div] { 96, 215 }, // cell# 204 : state 96 [conv.r8] --(76 mul)--> state 215 [conv.r8 -> mul] { 96, 216 }, // cell# 205 : state 96 [conv.r8] --(77 div)--> state 216 [conv.r8 -> div] {181, 190 }, // cell# 206 : state 181 [constrained] --(97 callvirt)--> state 190 [constrained -> callvirt] { 3, 217 }, // cell# 207 : state 3 [ldarg.0] --(21 ldc.i4.0)--> state 217 [ldarg.0 -> ldc.i4.0] { 36, 207 }, // cell# 208 : state 36 [ldc.r8] --(74 add)--> state 207 [ldc.r8 -> add] { 36, 208 }, // cell# 209 : state 36 [ldc.r8] --(75 sub)--> state 208 [ldc.r8 -> sub] { 36, 209 }, // cell# 210 : state 36 [ldc.r8] --(76 mul)--> state 209 [ldc.r8 -> mul] { 36, 210 }, // cell# 211 : state 36 [ldc.r8] --(77 div)--> state 210 [ldc.r8 -> div] { 95, 211 }, // cell# 212 : state 95 [conv.r4] --(74 add)--> state 211 [conv.r4 -> add] { 95, 212 }, // cell# 213 : state 95 [conv.r4] --(75 sub)--> state 212 [conv.r4 -> sub] { 95, 213 }, // cell# 214 : state 95 [conv.r4] --(76 mul)--> state 213 [conv.r4 -> mul] { 95, 214 }, // cell# 215 : state 95 [conv.r4] --(77 div)--> state 214 [conv.r4 -> div] {188, 197 }, // cell# 216 : state 188 [ldarga.s.normed] --(107 ldfld)--> state 197 [ldarga.s.normed -> ldfld] {189, 198 }, // cell# 217 : state 189 [ldloca.s.normed] --(107 ldfld)--> state 198 [ldloca.s.normed -> ldfld] {191, 242 }, // cell# 218 : state 191 [ldarg.0 -> ldfld] --(2 ldarg.1)--> state 242 [ldarg.0 -> ldfld -> ldarg.1] { 3, 219 }, // cell# 219 : state 3 [ldarg.0] --(33 ldc.r4)--> state 219 [ldarg.0 -> ldc.r4] { 3, 221 }, // cell# 220 : state 3 [ldarg.0] --(34 ldc.r8)--> state 221 [ldarg.0 -> ldc.r8] {195, 246 }, // cell# 221 : state 195 [ldarga.s -> ldfld] --(14 ldarga.s)--> state 246 [ldarga.s -> ldfld -> ldarga.s] { 3, 231 }, // cell# 222 : state 3 [ldarg.0] --(36 dup)--> state 231 [ldarg.0 -> dup] {217, 218 }, // cell# 223 : state 217 [ldarg.0 -> ldc.i4.0] --(109 stfld)--> state 218 [ldarg.0 -> ldc.i4.0 -> stfld] {219, 220 }, // cell# 224 : state 219 [ldarg.0 -> ldc.r4] --(109 stfld)--> state 220 [ldarg.0 -> ldc.r4 -> stfld] {221, 222 }, // cell# 225 : state 221 [ldarg.0 -> ldc.r8] --(109 stfld)--> state 222 [ldarg.0 -> ldc.r8 -> stfld] {223, 224 }, // cell# 226 : state 223 [ldarg.0 -> ldarg.1] --(107 ldfld)--> state 224 [ldarg.0 -> ldarg.1 -> ldfld] {224, 225 }, // cell# 227 : state 224 [ldarg.0 -> ldarg.1 -> ldfld] --(109 stfld)--> state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld] {223, 226 }, // cell# 228 : state 223 [ldarg.0 -> ldarg.1] --(109 stfld)--> state 226 [ldarg.0 -> ldarg.1 -> stfld] {227, 228 }, // cell# 229 : state 227 [ldarg.0 -> ldarg.2] --(109 stfld)--> state 228 [ldarg.0 -> ldarg.2 -> stfld] {229, 230 }, // cell# 230 : state 229 [ldarg.0 -> ldarg.3] --(109 stfld)--> state 230 [ldarg.0 -> ldarg.3 -> stfld] {231, 232 }, // cell# 231 : state 231 [ldarg.0 -> dup] --(107 ldfld)--> state 232 [ldarg.0 -> dup -> ldfld] {232, 233 }, // cell# 232 : state 232 [ldarg.0 -> dup -> ldfld] --(2 ldarg.1)--> state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] {233, 234 }, // cell# 233 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(74 add)--> state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] {233, 236 }, // cell# 234 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(75 sub)--> state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] {233, 238 }, // cell# 235 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(76 mul)--> state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] {233, 240 }, // cell# 236 : state 233 [ldarg.0 -> dup -> ldfld -> ldarg.1] --(77 div)--> state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] {234, 235 }, // cell# 237 : state 234 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add] --(109 stfld)--> state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld] {236, 237 }, // cell# 238 : state 236 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub] --(109 stfld)--> state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld] {238, 239 }, // cell# 239 : state 238 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul] --(109 stfld)--> state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld] {240, 241 }, // cell# 240 : state 240 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div] --(109 stfld)--> state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld] {242, 243 }, // cell# 241 : state 242 [ldarg.0 -> ldfld -> ldarg.1] --(107 ldfld)--> state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] {243, 244 }, // cell# 242 : state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] --(74 add)--> state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add] {243, 245 }, // cell# 243 : state 243 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld] --(75 sub)--> state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub] {246, 247 }, // cell# 244 : state 246 [ldarga.s -> ldfld -> ldarga.s] --(107 ldfld)--> state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] {247, 248 }, // cell# 245 : state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] --(74 add)--> state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add] {247, 249 }, // cell# 246 : state 247 [ldarga.s -> ldfld -> ldarga.s -> ldfld] --(75 sub)--> state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub] { 0, 0 }, // cell# 247 { 0, 0 }, // cell# 248 { 0, 0 }, // cell# 249 { 0, 0 }, // cell# 250 { 0, 0 }, // cell# 251 { 0, 0 }, // cell# 252 { 0, 0 }, // cell# 253 { 0, 0 }, // cell# 254 { 0, 0 }, // cell# 255 { 0, 0 }, // cell# 256 { 0, 0 }, // cell# 257 { 0, 0 }, // cell# 258 { 0, 0 }, // cell# 259 { 0, 0 }, // cell# 260 { 0, 0 }, // cell# 261 { 0, 0 }, // cell# 262 { 0, 0 }, // cell# 263 { 0, 0 }, // cell# 264 { 0, 0 }, // cell# 265 { 0, 0 }, // cell# 266 { 0, 0 }, // cell# 267 { 0, 0 }, // cell# 268 { 0, 0 }, // cell# 269 { 0, 0 }, // cell# 270 { 0, 0 }, // cell# 271 { 0, 0 }, // cell# 272 { 0, 0 }, // cell# 273 { 0, 0 }, // cell# 274 { 0, 0 }, // cell# 275 { 0, 0 }, // cell# 276 { 0, 0 }, // cell# 277 { 0, 0 }, // cell# 278 { 0, 0 }, // cell# 279 { 0, 0 }, // cell# 280 { 0, 0 }, // cell# 281 { 0, 0 }, // cell# 282 { 0, 0 }, // cell# 283 { 0, 0 }, // cell# 284 { 0, 0 }, // cell# 285 { 0, 0 }, // cell# 286 { 0, 0 }, // cell# 287 { 0, 0 }, // cell# 288 { 0, 0 }, // cell# 289 { 0, 0 }, // cell# 290 { 0, 0 }, // cell# 291 { 0, 0 }, // cell# 292 { 3, 191 }, // cell# 293 : state 3 [ldarg.0] --(107 ldfld)--> state 191 [ldarg.0 -> ldfld] { 0, 0 }, // cell# 294 { 0, 0 }, // cell# 295 { 0, 0 }, // cell# 296 { 0, 0 }, // cell# 297 { 0, 0 }, // cell# 298 { 0, 0 }, // cell# 299 { 0, 0 }, // cell# 300 { 0, 0 }, // cell# 301 { 0, 0 }, // cell# 302 { 0, 0 }, // cell# 303 { 0, 0 }, // cell# 304 { 0, 0 }, // cell# 305 { 0, 0 }, // cell# 306 { 0, 0 }, // cell# 307 { 0, 0 }, // cell# 308 { 0, 0 }, // cell# 309 { 0, 0 }, // cell# 310 { 0, 0 }, // cell# 311 { 0, 0 }, // cell# 312 { 0, 0 }, // cell# 313 { 0, 0 }, // cell# 314 { 0, 0 }, // cell# 315 { 0, 0 }, // cell# 316 { 0, 0 }, // cell# 317 { 0, 0 }, // cell# 318 { 0, 0 }, // cell# 319 { 0, 0 }, // cell# 320 { 0, 0 }, // cell# 321 { 0, 0 }, // cell# 322 { 0, 0 }, // cell# 323 { 0, 0 }, // cell# 324 { 0, 0 }, // cell# 325 { 0, 0 }, // cell# 326 { 0, 0 }, // cell# 327 { 0, 0 }, // cell# 328 { 0, 0 }, // cell# 329 { 0, 0 }, // cell# 330 { 0, 0 }, // cell# 331 { 0, 0 }, // cell# 332 { 0, 0 }, // cell# 333 { 0, 0 }, // cell# 334 { 0, 0 }, // cell# 335 { 0, 0 }, // cell# 336 { 0, 0 }, // cell# 337 { 0, 0 }, // cell# 338 { 0, 0 }, // cell# 339 { 0, 0 }, // cell# 340 { 0, 0 }, // cell# 341 { 0, 0 }, // cell# 342 { 0, 0 }, // cell# 343 { 0, 0 }, // cell# 344 { 0, 0 }, // cell# 345 { 0, 0 }, // cell# 346 { 0, 0 }, // cell# 347 { 0, 0 }, // cell# 348 { 0, 0 }, // cell# 349 { 0, 0 }, // cell# 350 { 0, 0 }, // cell# 351 { 0, 0 }, // cell# 352 { 0, 0 }, // cell# 353 { 0, 0 }, // cell# 354 { 0, 0 }, // cell# 355 { 0, 0 }, // cell# 356 { 0, 0 }, // cell# 357 { 0, 0 }, // cell# 358 { 0, 0 }, // cell# 359 { 0, 0 }, // cell# 360 { 0, 0 }, // cell# 361 { 0, 0 }, // cell# 362 { 0, 0 }, // cell# 363 { 0, 0 }, // cell# 364 { 0, 0 }, // cell# 365 { 0, 0 }, // cell# 366 { 0, 0 }, // cell# 367 { 0, 0 }, // cell# 368 { 0, 0 }, // cell# 369 { 0, 0 }, // cell# 370 { 0, 0 }, // cell# 371 { 0, 0 }, // cell# 372 { 0, 0 }, // cell# 373 { 0, 0 }, // cell# 374 { 0, 0 }, // cell# 375 { 0, 0 }, // cell# 376 { 0, 0 }, // cell# 377 { 0, 0 }, // cell# 378 { 0, 0 }, // cell# 379 { 0, 0 }, // cell# 380 { 0, 0 }, // cell# 381 { 0, 0 }, // cell# 382 { 0, 0 }, // cell# 383 { 0, 0 }, // cell# 384 { 0, 0 }, // cell# 385 { 0, 0 }, // cell# 386 { 0, 0 }, // cell# 387 { 0, 0 }, // cell# 388 { 0, 0 }, // cell# 389 { 0, 0 }, // cell# 390 { 0, 0 }, // cell# 391 { 0, 0 }, // cell# 392 { 0, 0 }, // cell# 393 { 0, 0 }, // cell# 394 { 0, 0 }, // cell# 395 { 0, 0 }, // cell# 396 { 0, 0 }, // cell# 397 { 0, 0 }, // cell# 398 { 0, 0 }, // cell# 399 { 0, 0 }, // cell# 400 { 0, 0 }, // cell# 401 { 0, 0 }, // cell# 402 { 0, 0 }, // cell# 403 { 0, 0 }, // cell# 404 { 0, 0 }, // cell# 405 { 0, 0 }, // cell# 406 { 0, 0 }, // cell# 407 { 0, 0 }, // cell# 408 { 0, 0 }, // cell# 409 { 0, 0 }, // cell# 410 { 0, 0 }, // cell# 411 { 0, 0 }, // cell# 412 { 0, 0 }, // cell# 413 { 0, 0 }, // cell# 414 { 0, 0 }, // cell# 415 { 0, 0 }, // cell# 416 { 0, 0 }, // cell# 417 }; // clang-format on const JumpTableCell* gp_SMJumpTableCells = g_SMJumpTableCells;
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/dlls/mscoree/mscoree.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // MSCoree.cpp //***************************************************************************** #include "stdafx.h" // Standard header. #include <utilcode.h> // Utility helpers. #include <posterror.h> // Error handlers #define INIT_GUIDS #include <corpriv.h> #include <winwrap.h> #include <mscoree.h> #include "shimload.h" #include "metadataexports.h" #include "ex.h" #include <dbgenginemetrics.h> #if !defined(CORECLR_EMBEDDED) BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error. HINSTANCE hInst, // Instance handle of the loaded module. DWORD dwReason, // Reason for loading. LPVOID lpReserved); // Unused. //***************************************************************************** // Handle lifetime of loaded library. //***************************************************************************** #include <shlwapi.h> #ifdef TARGET_WINDOWS extern "C" BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved); #endif // TARGET_WINDOWS extern "C" #ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved) { STATIC_CONTRACT_NOTHROW; return EEDllMain((HINSTANCE)hInstance, dwReason, lpReserved); } #endif // !defined(CORECLR_EMBEDDED) extern void* GetClrModuleBase(); // --------------------------------------------------------------------------- // %%Function: MetaDataGetDispenser // This function gets the Dispenser interface given the CLSID and REFIID. // --------------------------------------------------------------------------- STDAPI DLLEXPORT MetaDataGetDispenser( // Return HRESULT REFCLSID rclsid, // The class to desired. REFIID riid, // Interface wanted on class factory. LPVOID FAR *ppv) // Return interface pointer here. { CONTRACTL { NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; NonVMComHolder<IClassFactory> pcf(NULL); HRESULT hr; BEGIN_ENTRYPOINT_NOTHROW; IfFailGo(MetaDataDllGetClassObject(rclsid, IID_IClassFactory, (void **) &pcf)); hr = pcf->CreateInstance(NULL, riid, ppv); ErrExit: END_ENTRYPOINT_NOTHROW; return (hr); } // --------------------------------------------------------------------------- // %%Function: GetMetaDataInternalInterface // This function gets the IMDInternalImport given the metadata on memory. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataInternalInterface( LPVOID pData, // [IN] in memory metadata section ULONG cbData, // [IN] size of the metadata section DWORD flags, // [IN] MDInternal_OpenForRead or MDInternal_OpenForENC REFIID riid, // [IN] desired interface void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pData)); PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDInternalInterface(pData, cbData, flags, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: GetMetaDataInternalInterfaceFromPublic // This function gets the internal scopeless interface given the public // scopeless interface. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataInternalInterfaceFromPublic( IUnknown *pv, // [IN] Given interface. REFIID riid, // [IN] desired interface void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pv)); PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDInternalInterfaceFromPublic(pv, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: GetMetaDataPublicInterfaceFromInternal // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataPublicInterfaceFromInternal( void *pv, // [IN] Given interface. REFIID riid, // [IN] desired interface. void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pv)); PRECONDITION(CheckPointer(ppv)); ENTRY_POINT; } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDPublicInterfaceFromInternal(pv, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: ReopenMetaDataWithMemory // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI ReOpenMetaDataWithMemory( void *pUnk, // [IN] Given scope. public interfaces LPCVOID pData, // [in] Location of scope data. ULONG cbData) // [in] Size of the data pointed to by pData. { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pData)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = MDReOpenMetaDataWithMemory(pUnk, pData, cbData); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: ReopenMetaDataWithMemoryEx // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI ReOpenMetaDataWithMemoryEx( void *pUnk, // [IN] Given scope. public interfaces LPCVOID pData, // [in] Location of scope data. ULONG cbData, // [in] Size of the data pointed to by pData. DWORD dwReOpenFlags) // [in] ReOpen flags { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pData)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = MDReOpenMetaDataWithMemoryEx(pUnk, pData, cbData, dwReOpenFlags); END_ENTRYPOINT_NOTHROW; return hr; } static DWORD g_dwSystemDirectory = 0; static WCHAR * g_pSystemDirectory = NULL; HRESULT GetInternalSystemDirectory(_Out_writes_to_opt_(*pdwLength,*pdwLength) LPWSTR buffer, __inout DWORD* pdwLength) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(buffer, NULL_OK)); PRECONDITION(CheckPointer(pdwLength)); } CONTRACTL_END; if (g_dwSystemDirectory == 0) SetInternalSystemDirectory(); // // g_dwSystemDirectory includes the NULL in its count! // if(*pdwLength < g_dwSystemDirectory) { *pdwLength = g_dwSystemDirectory; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } if (buffer != NULL) { // // wcsncpy_s will automatically append a null and g_dwSystemDirectory // includes the null in its count, so we have to subtract 1. // wcsncpy_s(buffer, *pdwLength, g_pSystemDirectory, g_dwSystemDirectory-1); } *pdwLength = g_dwSystemDirectory; return S_OK; } LPCWSTR GetInternalSystemDirectory(_Out_ DWORD* pdwLength) { LIMITED_METHOD_CONTRACT; if (g_dwSystemDirectory == 0) { SetInternalSystemDirectory(); } if (pdwLength != NULL) { * pdwLength = g_dwSystemDirectory; } return g_pSystemDirectory; } HRESULT SetInternalSystemDirectory() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; HRESULT hr = S_OK; if(g_dwSystemDirectory == 0) { DWORD len = 0; NewArrayHolder<WCHAR> pSystemDirectory; EX_TRY{ // use local buffer for thread safety PathString wzSystemDirectory; hr = GetClrModuleDirectory(wzSystemDirectory); if (FAILED(hr)) { wzSystemDirectory.Set(W('\0')); } pSystemDirectory = wzSystemDirectory.GetCopyOfUnicodeString(); if (pSystemDirectory == NULL) { hr = HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY); } len = wzSystemDirectory.GetCount() + 1; } EX_CATCH_HRESULT(hr); // publish results idempotently with correct memory ordering g_pSystemDirectory = pSystemDirectory.Extract(); (void)InterlockedExchange((LONG *)&g_dwSystemDirectory, len); } return hr; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // MSCoree.cpp //***************************************************************************** #include "stdafx.h" // Standard header. #include <utilcode.h> // Utility helpers. #include <posterror.h> // Error handlers #define INIT_GUIDS #include <corpriv.h> #include <winwrap.h> #include <mscoree.h> #include "shimload.h" #include "metadataexports.h" #include "ex.h" #include <dbgenginemetrics.h> #if !defined(CORECLR_EMBEDDED) BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error. HINSTANCE hInst, // Instance handle of the loaded module. DWORD dwReason, // Reason for loading. LPVOID lpReserved); // Unused. //***************************************************************************** // Handle lifetime of loaded library. //***************************************************************************** #include <shlwapi.h> #ifdef TARGET_WINDOWS extern "C" BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved); #endif // TARGET_WINDOWS extern "C" #ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved) { STATIC_CONTRACT_NOTHROW; return EEDllMain((HINSTANCE)hInstance, dwReason, lpReserved); } #endif // !defined(CORECLR_EMBEDDED) extern void* GetClrModuleBase(); // --------------------------------------------------------------------------- // %%Function: MetaDataGetDispenser // This function gets the Dispenser interface given the CLSID and REFIID. // --------------------------------------------------------------------------- STDAPI DLLEXPORT MetaDataGetDispenser( // Return HRESULT REFCLSID rclsid, // The class to desired. REFIID riid, // Interface wanted on class factory. LPVOID FAR *ppv) // Return interface pointer here. { CONTRACTL { NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; NonVMComHolder<IClassFactory> pcf(NULL); HRESULT hr; BEGIN_ENTRYPOINT_NOTHROW; IfFailGo(MetaDataDllGetClassObject(rclsid, IID_IClassFactory, (void **) &pcf)); hr = pcf->CreateInstance(NULL, riid, ppv); ErrExit: END_ENTRYPOINT_NOTHROW; return (hr); } // --------------------------------------------------------------------------- // %%Function: GetMetaDataInternalInterface // This function gets the IMDInternalImport given the metadata on memory. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataInternalInterface( LPVOID pData, // [IN] in memory metadata section ULONG cbData, // [IN] size of the metadata section DWORD flags, // [IN] MDInternal_OpenForRead or MDInternal_OpenForENC REFIID riid, // [IN] desired interface void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pData)); PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDInternalInterface(pData, cbData, flags, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: GetMetaDataInternalInterfaceFromPublic // This function gets the internal scopeless interface given the public // scopeless interface. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataInternalInterfaceFromPublic( IUnknown *pv, // [IN] Given interface. REFIID riid, // [IN] desired interface void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pv)); PRECONDITION(CheckPointer(ppv)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDInternalInterfaceFromPublic(pv, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: GetMetaDataPublicInterfaceFromInternal // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI DLLEXPORT GetMetaDataPublicInterfaceFromInternal( void *pv, // [IN] Given interface. REFIID riid, // [IN] desired interface. void **ppv) // [OUT] returned interface { CONTRACTL{ NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(pv)); PRECONDITION(CheckPointer(ppv)); ENTRY_POINT; } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = GetMDPublicInterfaceFromInternal(pv, riid, ppv); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: ReopenMetaDataWithMemory // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI ReOpenMetaDataWithMemory( void *pUnk, // [IN] Given scope. public interfaces LPCVOID pData, // [in] Location of scope data. ULONG cbData) // [in] Size of the data pointed to by pData. { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pData)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = MDReOpenMetaDataWithMemory(pUnk, pData, cbData); END_ENTRYPOINT_NOTHROW; return hr; } // --------------------------------------------------------------------------- // %%Function: ReopenMetaDataWithMemoryEx // This function gets the public scopeless interface given the internal // scopeless interface. // --------------------------------------------------------------------------- STDAPI ReOpenMetaDataWithMemoryEx( void *pUnk, // [IN] Given scope. public interfaces LPCVOID pData, // [in] Location of scope data. ULONG cbData, // [in] Size of the data pointed to by pData. DWORD dwReOpenFlags) // [in] ReOpen flags { CONTRACTL{ NOTHROW; GC_NOTRIGGER; ENTRY_POINT; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pData)); } CONTRACTL_END; HRESULT hr = S_OK; BEGIN_ENTRYPOINT_NOTHROW; hr = MDReOpenMetaDataWithMemoryEx(pUnk, pData, cbData, dwReOpenFlags); END_ENTRYPOINT_NOTHROW; return hr; } static DWORD g_dwSystemDirectory = 0; static WCHAR * g_pSystemDirectory = NULL; HRESULT GetInternalSystemDirectory(_Out_writes_to_opt_(*pdwLength,*pdwLength) LPWSTR buffer, __inout DWORD* pdwLength) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(buffer, NULL_OK)); PRECONDITION(CheckPointer(pdwLength)); } CONTRACTL_END; if (g_dwSystemDirectory == 0) SetInternalSystemDirectory(); // // g_dwSystemDirectory includes the NULL in its count! // if(*pdwLength < g_dwSystemDirectory) { *pdwLength = g_dwSystemDirectory; return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); } if (buffer != NULL) { // // wcsncpy_s will automatically append a null and g_dwSystemDirectory // includes the null in its count, so we have to subtract 1. // wcsncpy_s(buffer, *pdwLength, g_pSystemDirectory, g_dwSystemDirectory-1); } *pdwLength = g_dwSystemDirectory; return S_OK; } LPCWSTR GetInternalSystemDirectory(_Out_ DWORD* pdwLength) { LIMITED_METHOD_CONTRACT; if (g_dwSystemDirectory == 0) { SetInternalSystemDirectory(); } if (pdwLength != NULL) { * pdwLength = g_dwSystemDirectory; } return g_pSystemDirectory; } HRESULT SetInternalSystemDirectory() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; HRESULT hr = S_OK; if(g_dwSystemDirectory == 0) { DWORD len = 0; NewArrayHolder<WCHAR> pSystemDirectory; EX_TRY{ // use local buffer for thread safety PathString wzSystemDirectory; hr = GetClrModuleDirectory(wzSystemDirectory); if (FAILED(hr)) { wzSystemDirectory.Set(W('\0')); } pSystemDirectory = wzSystemDirectory.GetCopyOfUnicodeString(); if (pSystemDirectory == NULL) { hr = HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY); } len = wzSystemDirectory.GetCount() + 1; } EX_CATCH_HRESULT(hr); // publish results idempotently with correct memory ordering g_pSystemDirectory = pSystemDirectory.Extract(); (void)InterlockedExchange((LONG *)&g_dwSystemDirectory, len); } return hr; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/vm/tieredcompilation.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: TieredCompilation.CPP // // =========================================================================== #include "common.h" #include "excep.h" #include "log.h" #include "win32threadpool.h" #include "threadsuspend.h" #include "tieredcompilation.h" // TieredCompilationManager determines which methods should be recompiled and // how they should be recompiled to best optimize the running code. It then // handles logistics of getting new code created and installed. // // // # Important entrypoints in this code: // // // a) .ctor - called once during AppDomain initialization // b) HandleCallCountingForFirstCall(...) - called when a method's code version is being // invoked for the first time. // // # Overall workflow // // Methods initially call into HandleCallCountingForFirstCall() and once the call count exceeds // a fixed limit we queue work on to our internal list of methods needing to // be recompiled (m_methodsToOptimize). If there is currently no thread // servicing our queue asynchronously then we use the runtime threadpool // QueueUserWorkItem to recruit one. During the callback for each threadpool work // item we handle as many methods as possible in a fixed period of time, then // queue another threadpool work item if m_methodsToOptimize hasn't been drained. // // The background thread enters at StaticBackgroundWorkCallback(), enters the // appdomain, and then begins calling OptimizeMethod on each method in the // queue. For each method we jit it, then update the precode so that future // entrypoint callers will run the new code. // // # Error handling // // The overall principle is don't swallow terminal failures that may have corrupted the // process (AV for example), but otherwise for any transient issue or functional limitation // that prevents us from optimizing log it for diagnostics and then back out gracefully, // continuing to run the less optimal code. The feature should be constructed so that // errors are limited to OS resource exhaustion or poorly behaved managed code // (for example within an AssemblyResolve event or static constructor triggered by the JIT). #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) CrstStatic TieredCompilationManager::s_lock; #ifdef _DEBUG Thread *TieredCompilationManager::s_backgroundWorkerThread = nullptr; #endif CLREvent TieredCompilationManager::s_backgroundWorkAvailableEvent; bool TieredCompilationManager::s_isBackgroundWorkerRunning = false; bool TieredCompilationManager::s_isBackgroundWorkerProcessingWork = false; // Called at AppDomain construction TieredCompilationManager::TieredCompilationManager() : m_countOfMethodsToOptimize(0), m_countOfNewMethodsCalledDuringDelay(0), m_methodsPendingCountingForTier1(nullptr), m_tier1CallCountingCandidateMethodRecentlyRecorded(false), m_isPendingCallCountingCompletion(false), m_recentlyRequestedCallCountingCompletion(false) { WRAPPER_NO_CONTRACT; // On Unix, we can reach here before EEConfig is initialized, so defer config-based initialization to Init() } // Called at AppDomain Init void TieredCompilationManager::Init() { CONTRACTL { GC_NOTRIGGER; CAN_TAKE_LOCK; MODE_PREEMPTIVE; } CONTRACTL_END; } #endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc) { WRAPPER_NO_CONTRACT; _ASSERTE(pMethodDesc != NULL); #ifdef FEATURE_TIERED_COMPILATION if (!pMethodDesc->IsEligibleForTieredCompilation()) { // The optimization tier is not used return NativeCodeVersion::OptimizationTierOptimized; } if (pMethodDesc->RequestedAggressiveOptimization()) { // Methods flagged with MethodImplOptions.AggressiveOptimization start with and stay at tier 1 return NativeCodeVersion::OptimizationTier1; } if (!pMethodDesc->GetLoaderAllocator()->GetCallCountingManager()->IsCallCountingEnabled(NativeCodeVersion(pMethodDesc))) { // Tier 0 call counting may have been disabled for several reasons, the intention is to start with and stay at an // optimized tier return NativeCodeVersion::OptimizationTierOptimized; } return NativeCodeVersion::OptimizationTier0; #else return NativeCodeVersion::OptimizationTierOptimized; #endif } #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) void TieredCompilationManager::HandleCallCountingForFirstCall(MethodDesc* pMethodDesc) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(pMethodDesc != nullptr); _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation()); _ASSERTE(g_pConfig->TieredCompilation_CallCountingDelayMs() != 0); // An exception here (OOM) would mean that the method's calls would not be counted and it would not be promoted. A // consideration is that an attempt can be made to reset the code entry point on exception (which can also OOM). Doesn't // seem worth it, the exception is propagated and there are other cases where a method may not be promoted due to OOM. bool createBackgroundWorker; { LockHolder tieredCompilationLockHolder; SArray<MethodDesc *> *methodsPendingCounting = m_methodsPendingCountingForTier1; _ASSERTE((methodsPendingCounting != nullptr) == IsTieringDelayActive()); if (methodsPendingCounting != nullptr) { methodsPendingCounting->Append(pMethodDesc); ++m_countOfNewMethodsCalledDuringDelay; if (!m_tier1CallCountingCandidateMethodRecentlyRecorded) { // Delay call counting for currently recoded methods further m_tier1CallCountingCandidateMethodRecentlyRecorded = true; } return; } NewHolder<SArray<MethodDesc *>> methodsPendingCountingHolder = new SArray<MethodDesc *>(); methodsPendingCountingHolder->Preallocate(64); methodsPendingCountingHolder->Append(pMethodDesc); ++m_countOfNewMethodsCalledDuringDelay; m_methodsPendingCountingForTier1 = methodsPendingCountingHolder.Extract(); _ASSERTE(!m_tier1CallCountingCandidateMethodRecentlyRecorded); _ASSERTE(IsTieringDelayActive()); // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. createBackgroundWorker = !TryScheduleBackgroundWorkerWithoutGCTrigger_Locked(); } if (createBackgroundWorker) { // Elsewhere, the tiered compilation lock is taken inside the code versioning lock. The code versioning lock is an // unsafe any-GC-mode lock, so the tiering lock is also that type of lock. Inside that type of lock, there is an // implicit GC_NOTRIGGER contract. So, a thread cannot be created inside the tiering lock since it may GC_TRIGGERS. At // this point, this is the only thread that may attempt creating the background worker thread. EX_TRY { CreateBackgroundWorker(); } EX_CATCH { // Since the tiering lock was released and reacquired, other methods may have been recorded in-between. Just // deactivate the tiering delay. Any methods that have been recorded would not have their calls be counted and // would not be promoted (due to the small window, there shouldn't be many of those). See consideration above in a // similar exception case. { LockHolder tieredCompilationLockHolder; _ASSERTE(IsTieringDelayActive()); m_tier1CallCountingCandidateMethodRecentlyRecorded = false; _ASSERTE(m_methodsPendingCountingForTier1 != nullptr); delete m_methodsPendingCountingForTier1; m_methodsPendingCountingForTier1 = nullptr; _ASSERTE(!IsTieringDelayActive()); } EX_RETHROW; } EX_END_CATCH(RethrowTerminalExceptions); } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { ETW::CompilationLog::TieredCompilation::Runtime::SendPause(); } } bool TieredCompilationManager::TrySetCodeEntryPointAndRecordMethodForCallCounting(MethodDesc* pMethodDesc, PCODE codeEntryPoint) { WRAPPER_NO_CONTRACT; _ASSERTE(pMethodDesc != nullptr); _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation()); _ASSERTE(codeEntryPoint != NULL); if (!IsTieringDelayActive()) { return false; } LockHolder tieredCompilationLockHolder; if (!IsTieringDelayActive()) { return false; } // Set the code entry point before recording the method for call counting to avoid a race. Otherwise, the tiering delay may // expire and enable call counting for the method before the entry point is set here, in which case calls to the method // would not be counted anymore. pMethodDesc->SetCodeEntryPoint(codeEntryPoint); _ASSERTE(m_methodsPendingCountingForTier1 != nullptr); m_methodsPendingCountingForTier1->Append(pMethodDesc); return true; } void TieredCompilationManager::AsyncPromoteToTier1( NativeCodeVersion tier0NativeCodeVersion, bool *createTieringBackgroundWorkerRef) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(CodeVersionManager::IsLockOwnedByCurrentThread()); _ASSERTE(!tier0NativeCodeVersion.IsNull()); _ASSERTE(tier0NativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0); _ASSERTE(createTieringBackgroundWorkerRef != nullptr); NativeCodeVersion t1NativeCodeVersion; HRESULT hr; // Add an inactive native code entry in the versioning table to track the tier1 // compilation we are going to create. This entry binds the compilation to a // particular version of the IL code regardless of any changes that may // occur between now and when jitting completes. If the IL does change in that // interval the new code entry won't be activated. MethodDesc *pMethodDesc = tier0NativeCodeVersion.GetMethodDesc(); ILCodeVersion ilCodeVersion = tier0NativeCodeVersion.GetILCodeVersion(); _ASSERTE(!ilCodeVersion.HasAnyOptimizedNativeCodeVersion(tier0NativeCodeVersion)); hr = ilCodeVersion.AddNativeCodeVersion(pMethodDesc, NativeCodeVersion::OptimizationTier1, &t1NativeCodeVersion); if (FAILED(hr)) { ThrowHR(hr); } // Insert the method into the optimization queue and trigger a thread to service // the queue if needed. SListElem<NativeCodeVersion>* pMethodListItem = new SListElem<NativeCodeVersion>(t1NativeCodeVersion); { LockHolder tieredCompilationLockHolder; m_methodsToOptimize.InsertTail(pMethodListItem); ++m_countOfMethodsToOptimize; LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::AsyncPromoteToTier1 Method=0x%pM (%s::%s), code version id=0x%x queued\n", pMethodDesc, pMethodDesc->m_pszDebugClassName, pMethodDesc->m_pszDebugMethodName, t1NativeCodeVersion.GetVersionId())); // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. if (TryScheduleBackgroundWorkerWithoutGCTrigger_Locked()) { return; } } // This function is called from a GC_NOTRIGGER scope and creating the background worker (creating a thread) may GC_TRIGGERS. // The caller needs to create the background worker after leaving the GC_NOTRIGGER scope. The contract is that the caller // must make an attempt to create the background worker in any normal path. In the event of an atypical exception (eg. OOM), // the background worker may not be created and would have to be tried again the next time some background work is queued. *createTieringBackgroundWorkerRef = true; } bool TieredCompilationManager::TryScheduleBackgroundWorkerWithoutGCTrigger_Locked() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsLockOwnedByCurrentThread()); if (s_isBackgroundWorkerProcessingWork) { _ASSERTE(s_isBackgroundWorkerRunning); return true; } if (s_isBackgroundWorkerRunning) { s_isBackgroundWorkerProcessingWork = true; s_backgroundWorkAvailableEvent.Set(); return true; } s_isBackgroundWorkerRunning = true; s_isBackgroundWorkerProcessingWork = true; return false; // it's the caller's responsibility to call CreateBackgroundWorker() after leaving the GC_NOTRIGGER region } void TieredCompilationManager::CreateBackgroundWorker() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(!IsLockOwnedByCurrentThread()); _ASSERTE(s_isBackgroundWorkerRunning); _ASSERTE(s_isBackgroundWorkerProcessingWork); _ASSERTE(s_backgroundWorkerThread == nullptr); EX_TRY { if (!s_backgroundWorkAvailableEvent.IsValid()) { // An auto-reset event is used since it's a bit easier to manage and felt more natural in this case. It is also // possible to use a manual-reset event instead, though there doesn't appear to be anything to gain from doing so. s_backgroundWorkAvailableEvent.CreateAutoEvent(false); } Thread *newThread = SetupUnstartedThread(); _ASSERTE(newThread != nullptr); INDEBUG(s_backgroundWorkerThread = newThread); #ifdef FEATURE_COMINTEROP newThread->SetApartment(Thread::AS_InMTA); #endif newThread->SetBackground(true); if (!newThread->CreateNewThread(0, BackgroundWorkerBootstrapper0, newThread, W(".NET Tiered Compilation Worker"))) { newThread->DecExternalCount(false); ThrowOutOfMemory(); } newThread->StartThread(); } EX_CATCH { { LockHolder tieredCompilationLockHolder; s_isBackgroundWorkerProcessingWork = false; s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); } EX_RETHROW; } EX_END_CATCH(RethrowTerminalExceptions); } DWORD WINAPI TieredCompilationManager::BackgroundWorkerBootstrapper0(LPVOID args) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(args != nullptr); Thread *thread = (Thread *)args; _ASSERTE(s_backgroundWorkerThread == thread); if (!thread->HasStarted()) { LockHolder tieredCompilationLockHolder; s_isBackgroundWorkerProcessingWork = false; s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); return 0; } _ASSERTE(GetThread() == thread); ManagedThreadBase::KickOff(BackgroundWorkerBootstrapper1, nullptr); GCX_PREEMP_NO_DTOR(); DestroyThread(thread); return 0; } void TieredCompilationManager::BackgroundWorkerBootstrapper1(LPVOID) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; GCX_PREEMP(); GetAppDomain()->GetTieredCompilationManager()->BackgroundWorkerStart(); } void TieredCompilationManager::BackgroundWorkerStart() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(s_backgroundWorkAvailableEvent.IsValid()); DWORD timeoutMs = g_pConfig->TieredCompilation_BackgroundWorkerTimeoutMs(); DWORD delayMs = g_pConfig->TieredCompilation_CallCountingDelayMs(); int processorCount = GetCurrentProcessCpuCount(); _ASSERTE(processorCount > 0); LARGE_INTEGER li; QueryPerformanceFrequency(&li); UINT64 ticksPerS = li.QuadPart; UINT64 maxWorkDurationTicks = ticksPerS * 50 / 1000; // 50 ms UINT64 minWorkDurationTicks = min(ticksPerS * processorCount / 1000, maxWorkDurationTicks); // <proc count> ms (capped) UINT64 workDurationTicks = minWorkDurationTicks; while (true) { _ASSERTE(s_isBackgroundWorkerRunning); _ASSERTE(s_isBackgroundWorkerProcessingWork); if (IsTieringDelayActive()) { do { ClrSleepEx(delayMs, false); } while (!TryDeactivateTieringDelay()); } // Don't want to perform background work as soon as it is scheduled if there is possibly more important work that could // be done. Some operating systems may also give a thread woken by a signal higher priority temporarily, which on a // CPU-limited environment may lead to rejitting a method as soon as it's promoted, effectively in the foreground. ClrSleepEx(0, false); if (IsTieringDelayActive()) { continue; } if ((m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0) && !DoBackgroundWork(&workDurationTicks, minWorkDurationTicks, maxWorkDurationTicks)) { // Background work was interrupted due to the tiering delay being activated _ASSERTE(IsTieringDelayActive()); continue; } { LockHolder tieredCompilationLockHolder; if (IsTieringDelayActive() || m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0) { continue; } s_isBackgroundWorkerProcessingWork = false; } // Wait for the worker to be scheduled again DWORD waitResult = s_backgroundWorkAvailableEvent.Wait(timeoutMs, false); if (waitResult == WAIT_OBJECT_0) { continue; } _ASSERTE(waitResult == WAIT_TIMEOUT); // The wait timed out, see if the worker can exit LockHolder tieredCompilationLockHolder; if (s_isBackgroundWorkerProcessingWork) { // The background worker got scheduled again just as the wait timed out. The event would have been signaled just // after the wait had timed out, so reset it and continue processing work. s_backgroundWorkAvailableEvent.Reset(); continue; } s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); return; } } bool TieredCompilationManager::IsTieringDelayActive() { LIMITED_METHOD_CONTRACT; return m_methodsPendingCountingForTier1 != nullptr; } bool TieredCompilationManager::TryDeactivateTieringDelay() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(GetThread() == s_backgroundWorkerThread); SArray<MethodDesc *> *methodsPendingCounting = nullptr; UINT32 countOfNewMethodsCalledDuringDelay = 0; { // It's possible for the timer to tick before it is recorded that the delay is in effect. This lock guarantees that // the delay is in effect. LockHolder tieredCompilationLockHolder; _ASSERTE(IsTieringDelayActive()); if (m_tier1CallCountingCandidateMethodRecentlyRecorded) { m_tier1CallCountingCandidateMethodRecentlyRecorded = false; return false; } // Exchange information into locals inside the lock methodsPendingCounting = m_methodsPendingCountingForTier1; _ASSERTE(methodsPendingCounting != nullptr); m_methodsPendingCountingForTier1 = nullptr; countOfNewMethodsCalledDuringDelay = m_countOfNewMethodsCalledDuringDelay; m_countOfNewMethodsCalledDuringDelay = 0; _ASSERTE(!IsTieringDelayActive()); } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { ETW::CompilationLog::TieredCompilation::Runtime::SendResume(countOfNewMethodsCalledDuringDelay); } // Install call counters { MethodDesc** methods = methodsPendingCounting->GetElements(); COUNT_T methodCount = methodsPendingCounting->GetCount(); CodeVersionManager *codeVersionManager = GetAppDomain()->GetCodeVersionManager(); MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder; // Backpatching entry point slots requires cooperative GC mode, see // MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that // may be taken in any GC mode. The lock is taken in cooperative GC mode on some other paths, so the same ordering // must be used here to prevent deadlock. GCX_COOP(); CodeVersionManager::LockHolder codeVersioningLockHolder; for (COUNT_T i = 0; i < methodCount; ++i) { MethodDesc *methodDesc = methods[i]; _ASSERTE(codeVersionManager == methodDesc->GetCodeVersionManager()); NativeCodeVersion activeCodeVersion = codeVersionManager->GetActiveILCodeVersion(methodDesc).GetActiveNativeCodeVersion(methodDesc); if (activeCodeVersion.IsNull()) { continue; } EX_TRY { bool wasSet = CallCountingManager::SetCodeEntryPoint(activeCodeVersion, activeCodeVersion.GetNativeCode(), false, nullptr); _ASSERTE(wasSet); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DeactivateTieringDelay: " "Exception in CallCountingManager::SetCodeEntryPoint, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); } } delete methodsPendingCounting; return true; } void TieredCompilationManager::AsyncCompleteCallCounting() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; { LockHolder tieredCompilationLockHolder; if (m_recentlyRequestedCallCountingCompletion) { _ASSERTE(m_isPendingCallCountingCompletion); } else { m_isPendingCallCountingCompletion = true; // A potentially large number of methods may reach the call count threshold at about the same time or in bursts. // This field is used to coalesce a burst of pending completions, see the background work. m_recentlyRequestedCallCountingCompletion = true; } // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. if (TryScheduleBackgroundWorkerWithoutGCTrigger_Locked()) { return; } } CreateBackgroundWorker(); // requires GC_TRIGGERS } //This method will process one or more methods from optimization queue // on a background thread. Each such method will be jitted with code // optimizations enabled and then installed as the active implementation // of the method entrypoint. bool TieredCompilationManager::DoBackgroundWork( UINT64 *workDurationTicksRef, UINT64 minWorkDurationTicks, UINT64 maxWorkDurationTicks) { WRAPPER_NO_CONTRACT; _ASSERTE(GetThread() == s_backgroundWorkerThread); _ASSERTE(m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0); _ASSERTE(workDurationTicksRef != nullptr); _ASSERTE(minWorkDurationTicks <= maxWorkDurationTicks); UINT64 workDurationTicks = *workDurationTicksRef; _ASSERTE(workDurationTicks >= minWorkDurationTicks); _ASSERTE(workDurationTicks <= maxWorkDurationTicks); if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(countOfMethodsToOptimize); } bool sendStopEvent = true; bool allMethodsJitted = false; UINT32 jittedMethodCount = 0; LARGE_INTEGER li; QueryPerformanceCounter(&li); UINT64 startTicks = li.QuadPart; UINT64 previousTicks = startTicks; do { bool completeCallCounting = false; NativeCodeVersion nativeCodeVersionToOptimize; { LockHolder tieredCompilationLockHolder; if (IsTieringDelayActive()) { break; } bool wasPendingCallCountingCompletion = m_isPendingCallCountingCompletion; if (wasPendingCallCountingCompletion) { if (m_recentlyRequestedCallCountingCompletion) { // A potentially large number of methods may reach the call count threshold at about the same time or in // bursts. To coalesce a burst of pending completions a bit, if another method has reached the call count // threshold since the last time it was checked here, don't complete call counting yet. Coalescing // call counting completions a bit helps to avoid blocking foreground threads due to lock contention as // methods are continuing to reach the call count threshold. m_recentlyRequestedCallCountingCompletion = false; } else { m_isPendingCallCountingCompletion = false; completeCallCounting = true; } } if (!completeCallCounting) { nativeCodeVersionToOptimize = GetNextMethodToOptimize(); if (nativeCodeVersionToOptimize.IsNull()) { // Ran out of methods to JIT if (wasPendingCallCountingCompletion) { // If call counting completions are pending and delayed above for coalescing, complete call counting // now, as that will add more methods to be rejitted m_isPendingCallCountingCompletion = false; _ASSERTE(!m_recentlyRequestedCallCountingCompletion); completeCallCounting = true; } else { allMethodsJitted = true; break; } } } } _ASSERTE(completeCallCounting == !!nativeCodeVersionToOptimize.IsNull()); if (completeCallCounting) { EX_TRY { CallCountingManager::CompleteCallCounting(); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DoBackgroundWork: " "Exception in CallCountingManager::CompleteCallCounting, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); continue; } OptimizeMethod(nativeCodeVersionToOptimize); ++jittedMethodCount; // Yield the thread periodically to give preference to possibly more important work QueryPerformanceCounter(&li); UINT64 currentTicks = li.QuadPart; if (currentTicks - startTicks < workDurationTicks) { previousTicks = currentTicks; continue; } if (currentTicks - previousTicks >= maxWorkDurationTicks) { // It's unlikely that one iteration above would have taken that long, more likely this thread got scheduled out for // a while, in which case there is no need to yield again. Discount the time taken for the previous iteration and // continue processing work. startTicks += currentTicks - previousTicks; previousTicks = currentTicks; continue; } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(countOfMethodsToOptimize, jittedMethodCount); } UINT64 beforeSleepTicks = currentTicks; ClrSleepEx(0, false); QueryPerformanceCounter(&li); currentTicks = li.QuadPart; // Depending on how oversubscribed thread usage is on the system, the sleep may have caused this thread to not be // scheduled for a long time. Yielding the thread too frequently may significantly slow down the background work, which // may significantly delay how long it takes to reach steady-state performance. On the other hand, yielding the thread // too infrequently may cause the background work to monopolize the available CPU resources and prevent more important // foreground work from occurring. So the sleep duration is measured and for the next batch of background work, at least // a portion of that measured duration is used (within the min and max to keep things sensible). Since the background // work duration is capped to a maximum and since a long sleep delay is likely to repeat, to avoid going back to // too-frequent yielding too quickly, the background work duration is decayed back to the minimum if the sleep duration // becomes consistently short. UINT64 newWorkDurationTicks = (currentTicks - beforeSleepTicks) / 4; UINT64 decayedWorkDurationTicks = (workDurationTicks + workDurationTicks / 2) / 2; workDurationTicks = newWorkDurationTicks < decayedWorkDurationTicks ? decayedWorkDurationTicks : newWorkDurationTicks; if (workDurationTicks < minWorkDurationTicks) { workDurationTicks = minWorkDurationTicks; } else if (workDurationTicks > maxWorkDurationTicks) { workDurationTicks = maxWorkDurationTicks; } if (IsTieringDelayActive()) { sendStopEvent = false; break; } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(countOfMethodsToOptimize); } jittedMethodCount = 0; startTicks = previousTicks = currentTicks; } while (!IsTieringDelayActive()); if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled() && sendStopEvent) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(countOfMethodsToOptimize, jittedMethodCount); } if (allMethodsJitted) { EX_TRY { CallCountingManager::StopAndDeleteAllCallCountingStubs(); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DoBackgroundWork: " "Exception in CallCountingManager::StopAndDeleteAllCallCountingStubs, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); } *workDurationTicksRef = workDurationTicks; return allMethodsJitted; } // Jit compiles and installs new optimized code for a method. // Called on a background thread. void TieredCompilationManager::OptimizeMethod(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; _ASSERTE(nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation()); if (CompileCodeVersion(nativeCodeVersion)) { ActivateCodeVersion(nativeCodeVersion); } } // Compiles new optimized code for a method. // Called on a background thread. BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; PCODE pCode = NULL; MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc(); EX_TRY { PrepareCodeConfigBuffer configBuffer(nativeCodeVersion); PrepareCodeConfig *config = configBuffer.GetConfig(); // This is a recompiling request which means the caller was // in COOP mode since the code already ran. _ASSERTE(!pMethod->HasUnmanagedCallersOnlyAttribute()); config->SetCallerGCMode(CallerGCMode::Coop); pCode = pMethod->PrepareCode(config); LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::CompileCodeVersion Method=0x%pM (%s::%s), code version id=0x%x, code ptr=0x%p\n", pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName, nativeCodeVersion.GetVersionId(), pCode)); if (config->JitSwitchedToMinOpt()) { // The JIT decided to switch to min-opts, likely due to the method being very large or complex. The rejitted code // may be slower if the method had been prejitted. Ignore the rejitted code and continue using the tier 0 entry // point. // TODO: In the future, we should get some feedback from images containing pregenerated code and from tier 0 JIT // indicating that the method would not benefit from a rejit and avoid the rejit altogether. pCode = NULL; } } EX_CATCH { // Failing to jit should be rare but acceptable. We will leave whatever code already exists in place. STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::CompileCodeVersion: Method %pM failed to jit, hr=0x%x\n", pMethod, GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions) return pCode != NULL; } // Updates the MethodDesc and precode so that future invocations of a method will // execute the native code pointed to by pCode. // Called on a background thread. void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc(); // If the ilParent version is active this will activate the native code version now. // Otherwise if the ilParent version becomes active again in the future the native // code version will activate then. ILCodeVersion ilParent; HRESULT hr = S_OK; { bool mayHaveEntryPointSlotsToBackpatch = pMethod->MayHaveEntryPointSlotsToBackpatch(); MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder( mayHaveEntryPointSlotsToBackpatch); // Backpatching entry point slots requires cooperative GC mode, see // MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that // may be taken in any GC mode. The lock is taken in cooperative GC mode on some other paths, so the same ordering // must be used here to prevent deadlock. GCX_MAYBE_COOP(mayHaveEntryPointSlotsToBackpatch); CodeVersionManager::LockHolder codeVersioningLockHolder; // As long as we are exclusively using any non-JumpStamp publishing for tiered compilation // methods this first attempt should succeed ilParent = nativeCodeVersion.GetILCodeVersion(); hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion); LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::ActivateCodeVersion Method=0x%pM (%s::%s), code version id=0x%x. SetActiveNativeCodeVersion ret=0x%x\n", pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName, nativeCodeVersion.GetVersionId(), hr)); } if (FAILED(hr)) { STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::ActivateCodeVersion: " "Method %pM failed to publish native code for native code version %d\n", pMethod, nativeCodeVersion.GetVersionId()); } } // Dequeues the next method in the optmization queue. // This runs on the background thread. NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsLockOwnedByCurrentThread()); SListElem<NativeCodeVersion>* pElem = m_methodsToOptimize.RemoveHead(); if (pElem != NULL) { NativeCodeVersion nativeCodeVersion = pElem->GetValue(); delete pElem; _ASSERTE(m_countOfMethodsToOptimize != 0); --m_countOfMethodsToOptimize; return nativeCodeVersion; } return NativeCodeVersion(); } //static CORJIT_FLAGS TieredCompilationManager::GetJitFlags(PrepareCodeConfig *config) { WRAPPER_NO_CONTRACT; _ASSERTE(config != nullptr); _ASSERTE( !config->WasTieringDisabledBeforeJitting() || config->GetCodeVersion().GetOptimizationTier() != NativeCodeVersion::OptimizationTier0); CORJIT_FLAGS flags; // Determine the optimization tier for the default code version (slightly faster common path during startup compared to // below), and disable call counting and set the optimization tier if it's not going to be tier 0 (this is used in other // places for the default code version where necessary to avoid the extra expense of GetOptimizationTier()). NativeCodeVersion nativeCodeVersion = config->GetCodeVersion(); if (nativeCodeVersion.IsDefaultVersion() && !config->WasTieringDisabledBeforeJitting()) { MethodDesc *methodDesc = nativeCodeVersion.GetMethodDesc(); if (!methodDesc->IsEligibleForTieredCompilation()) { _ASSERTE(nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTierOptimized); #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif return flags; } NativeCodeVersion::OptimizationTier newOptimizationTier; if (!methodDesc->RequestedAggressiveOptimization()) { if (g_pConfig->TieredCompilation_QuickJit()) { _ASSERTE(nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0); flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); return flags; } newOptimizationTier = NativeCodeVersion::OptimizationTierOptimized; } else { newOptimizationTier = NativeCodeVersion::OptimizationTier1; flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1); } methodDesc->GetLoaderAllocator()->GetCallCountingManager()->DisableCallCounting(nativeCodeVersion); nativeCodeVersion.SetOptimizationTier(newOptimizationTier); #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif return flags; } switch (nativeCodeVersion.GetOptimizationTier()) { case NativeCodeVersion::OptimizationTier0: if (g_pConfig->TieredCompilation_QuickJit()) { flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); break; } nativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized); goto Optimized; #ifdef FEATURE_ON_STACK_REPLACEMENT case NativeCodeVersion::OptimizationTier1OSR: flags.Set(CORJIT_FLAGS::CORJIT_FLAG_OSR); FALLTHROUGH; #endif case NativeCodeVersion::OptimizationTier1: flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1); FALLTHROUGH; case NativeCodeVersion::OptimizationTierOptimized: Optimized: #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif break; default: UNREACHABLE(); } return flags; } #ifdef _DEBUG bool TieredCompilationManager::IsLockOwnedByCurrentThread() { WRAPPER_NO_CONTRACT; return !!s_lock.OwnedByCurrentThread(); } #endif // _DEBUG #endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: TieredCompilation.CPP // // =========================================================================== #include "common.h" #include "excep.h" #include "log.h" #include "win32threadpool.h" #include "threadsuspend.h" #include "tieredcompilation.h" // TieredCompilationManager determines which methods should be recompiled and // how they should be recompiled to best optimize the running code. It then // handles logistics of getting new code created and installed. // // // # Important entrypoints in this code: // // // a) .ctor - called once during AppDomain initialization // b) HandleCallCountingForFirstCall(...) - called when a method's code version is being // invoked for the first time. // // # Overall workflow // // Methods initially call into HandleCallCountingForFirstCall() and once the call count exceeds // a fixed limit we queue work on to our internal list of methods needing to // be recompiled (m_methodsToOptimize). If there is currently no thread // servicing our queue asynchronously then we use the runtime threadpool // QueueUserWorkItem to recruit one. During the callback for each threadpool work // item we handle as many methods as possible in a fixed period of time, then // queue another threadpool work item if m_methodsToOptimize hasn't been drained. // // The background thread enters at StaticBackgroundWorkCallback(), enters the // appdomain, and then begins calling OptimizeMethod on each method in the // queue. For each method we jit it, then update the precode so that future // entrypoint callers will run the new code. // // # Error handling // // The overall principle is don't swallow terminal failures that may have corrupted the // process (AV for example), but otherwise for any transient issue or functional limitation // that prevents us from optimizing log it for diagnostics and then back out gracefully, // continuing to run the less optimal code. The feature should be constructed so that // errors are limited to OS resource exhaustion or poorly behaved managed code // (for example within an AssemblyResolve event or static constructor triggered by the JIT). #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) CrstStatic TieredCompilationManager::s_lock; #ifdef _DEBUG Thread *TieredCompilationManager::s_backgroundWorkerThread = nullptr; #endif CLREvent TieredCompilationManager::s_backgroundWorkAvailableEvent; bool TieredCompilationManager::s_isBackgroundWorkerRunning = false; bool TieredCompilationManager::s_isBackgroundWorkerProcessingWork = false; // Called at AppDomain construction TieredCompilationManager::TieredCompilationManager() : m_countOfMethodsToOptimize(0), m_countOfNewMethodsCalledDuringDelay(0), m_methodsPendingCountingForTier1(nullptr), m_tier1CallCountingCandidateMethodRecentlyRecorded(false), m_isPendingCallCountingCompletion(false), m_recentlyRequestedCallCountingCompletion(false) { WRAPPER_NO_CONTRACT; // On Unix, we can reach here before EEConfig is initialized, so defer config-based initialization to Init() } // Called at AppDomain Init void TieredCompilationManager::Init() { CONTRACTL { GC_NOTRIGGER; CAN_TAKE_LOCK; MODE_PREEMPTIVE; } CONTRACTL_END; } #endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc) { WRAPPER_NO_CONTRACT; _ASSERTE(pMethodDesc != NULL); #ifdef FEATURE_TIERED_COMPILATION if (!pMethodDesc->IsEligibleForTieredCompilation()) { // The optimization tier is not used return NativeCodeVersion::OptimizationTierOptimized; } if (pMethodDesc->RequestedAggressiveOptimization()) { // Methods flagged with MethodImplOptions.AggressiveOptimization start with and stay at tier 1 return NativeCodeVersion::OptimizationTier1; } if (!pMethodDesc->GetLoaderAllocator()->GetCallCountingManager()->IsCallCountingEnabled(NativeCodeVersion(pMethodDesc))) { // Tier 0 call counting may have been disabled for several reasons, the intention is to start with and stay at an // optimized tier return NativeCodeVersion::OptimizationTierOptimized; } return NativeCodeVersion::OptimizationTier0; #else return NativeCodeVersion::OptimizationTierOptimized; #endif } #if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE) void TieredCompilationManager::HandleCallCountingForFirstCall(MethodDesc* pMethodDesc) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(pMethodDesc != nullptr); _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation()); _ASSERTE(g_pConfig->TieredCompilation_CallCountingDelayMs() != 0); // An exception here (OOM) would mean that the method's calls would not be counted and it would not be promoted. A // consideration is that an attempt can be made to reset the code entry point on exception (which can also OOM). Doesn't // seem worth it, the exception is propagated and there are other cases where a method may not be promoted due to OOM. bool createBackgroundWorker; { LockHolder tieredCompilationLockHolder; SArray<MethodDesc *> *methodsPendingCounting = m_methodsPendingCountingForTier1; _ASSERTE((methodsPendingCounting != nullptr) == IsTieringDelayActive()); if (methodsPendingCounting != nullptr) { methodsPendingCounting->Append(pMethodDesc); ++m_countOfNewMethodsCalledDuringDelay; if (!m_tier1CallCountingCandidateMethodRecentlyRecorded) { // Delay call counting for currently recoded methods further m_tier1CallCountingCandidateMethodRecentlyRecorded = true; } return; } NewHolder<SArray<MethodDesc *>> methodsPendingCountingHolder = new SArray<MethodDesc *>(); methodsPendingCountingHolder->Preallocate(64); methodsPendingCountingHolder->Append(pMethodDesc); ++m_countOfNewMethodsCalledDuringDelay; m_methodsPendingCountingForTier1 = methodsPendingCountingHolder.Extract(); _ASSERTE(!m_tier1CallCountingCandidateMethodRecentlyRecorded); _ASSERTE(IsTieringDelayActive()); // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. createBackgroundWorker = !TryScheduleBackgroundWorkerWithoutGCTrigger_Locked(); } if (createBackgroundWorker) { // Elsewhere, the tiered compilation lock is taken inside the code versioning lock. The code versioning lock is an // unsafe any-GC-mode lock, so the tiering lock is also that type of lock. Inside that type of lock, there is an // implicit GC_NOTRIGGER contract. So, a thread cannot be created inside the tiering lock since it may GC_TRIGGERS. At // this point, this is the only thread that may attempt creating the background worker thread. EX_TRY { CreateBackgroundWorker(); } EX_CATCH { // Since the tiering lock was released and reacquired, other methods may have been recorded in-between. Just // deactivate the tiering delay. Any methods that have been recorded would not have their calls be counted and // would not be promoted (due to the small window, there shouldn't be many of those). See consideration above in a // similar exception case. { LockHolder tieredCompilationLockHolder; _ASSERTE(IsTieringDelayActive()); m_tier1CallCountingCandidateMethodRecentlyRecorded = false; _ASSERTE(m_methodsPendingCountingForTier1 != nullptr); delete m_methodsPendingCountingForTier1; m_methodsPendingCountingForTier1 = nullptr; _ASSERTE(!IsTieringDelayActive()); } EX_RETHROW; } EX_END_CATCH(RethrowTerminalExceptions); } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { ETW::CompilationLog::TieredCompilation::Runtime::SendPause(); } } bool TieredCompilationManager::TrySetCodeEntryPointAndRecordMethodForCallCounting(MethodDesc* pMethodDesc, PCODE codeEntryPoint) { WRAPPER_NO_CONTRACT; _ASSERTE(pMethodDesc != nullptr); _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation()); _ASSERTE(codeEntryPoint != NULL); if (!IsTieringDelayActive()) { return false; } LockHolder tieredCompilationLockHolder; if (!IsTieringDelayActive()) { return false; } // Set the code entry point before recording the method for call counting to avoid a race. Otherwise, the tiering delay may // expire and enable call counting for the method before the entry point is set here, in which case calls to the method // would not be counted anymore. pMethodDesc->SetCodeEntryPoint(codeEntryPoint); _ASSERTE(m_methodsPendingCountingForTier1 != nullptr); m_methodsPendingCountingForTier1->Append(pMethodDesc); return true; } void TieredCompilationManager::AsyncPromoteToTier1( NativeCodeVersion tier0NativeCodeVersion, bool *createTieringBackgroundWorkerRef) { CONTRACTL { THROWS; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(CodeVersionManager::IsLockOwnedByCurrentThread()); _ASSERTE(!tier0NativeCodeVersion.IsNull()); _ASSERTE(tier0NativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0); _ASSERTE(createTieringBackgroundWorkerRef != nullptr); NativeCodeVersion t1NativeCodeVersion; HRESULT hr; // Add an inactive native code entry in the versioning table to track the tier1 // compilation we are going to create. This entry binds the compilation to a // particular version of the IL code regardless of any changes that may // occur between now and when jitting completes. If the IL does change in that // interval the new code entry won't be activated. MethodDesc *pMethodDesc = tier0NativeCodeVersion.GetMethodDesc(); ILCodeVersion ilCodeVersion = tier0NativeCodeVersion.GetILCodeVersion(); _ASSERTE(!ilCodeVersion.HasAnyOptimizedNativeCodeVersion(tier0NativeCodeVersion)); hr = ilCodeVersion.AddNativeCodeVersion(pMethodDesc, NativeCodeVersion::OptimizationTier1, &t1NativeCodeVersion); if (FAILED(hr)) { ThrowHR(hr); } // Insert the method into the optimization queue and trigger a thread to service // the queue if needed. SListElem<NativeCodeVersion>* pMethodListItem = new SListElem<NativeCodeVersion>(t1NativeCodeVersion); { LockHolder tieredCompilationLockHolder; m_methodsToOptimize.InsertTail(pMethodListItem); ++m_countOfMethodsToOptimize; LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::AsyncPromoteToTier1 Method=0x%pM (%s::%s), code version id=0x%x queued\n", pMethodDesc, pMethodDesc->m_pszDebugClassName, pMethodDesc->m_pszDebugMethodName, t1NativeCodeVersion.GetVersionId())); // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. if (TryScheduleBackgroundWorkerWithoutGCTrigger_Locked()) { return; } } // This function is called from a GC_NOTRIGGER scope and creating the background worker (creating a thread) may GC_TRIGGERS. // The caller needs to create the background worker after leaving the GC_NOTRIGGER scope. The contract is that the caller // must make an attempt to create the background worker in any normal path. In the event of an atypical exception (eg. OOM), // the background worker may not be created and would have to be tried again the next time some background work is queued. *createTieringBackgroundWorkerRef = true; } bool TieredCompilationManager::TryScheduleBackgroundWorkerWithoutGCTrigger_Locked() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsLockOwnedByCurrentThread()); if (s_isBackgroundWorkerProcessingWork) { _ASSERTE(s_isBackgroundWorkerRunning); return true; } if (s_isBackgroundWorkerRunning) { s_isBackgroundWorkerProcessingWork = true; s_backgroundWorkAvailableEvent.Set(); return true; } s_isBackgroundWorkerRunning = true; s_isBackgroundWorkerProcessingWork = true; return false; // it's the caller's responsibility to call CreateBackgroundWorker() after leaving the GC_NOTRIGGER region } void TieredCompilationManager::CreateBackgroundWorker() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(!IsLockOwnedByCurrentThread()); _ASSERTE(s_isBackgroundWorkerRunning); _ASSERTE(s_isBackgroundWorkerProcessingWork); _ASSERTE(s_backgroundWorkerThread == nullptr); EX_TRY { if (!s_backgroundWorkAvailableEvent.IsValid()) { // An auto-reset event is used since it's a bit easier to manage and felt more natural in this case. It is also // possible to use a manual-reset event instead, though there doesn't appear to be anything to gain from doing so. s_backgroundWorkAvailableEvent.CreateAutoEvent(false); } Thread *newThread = SetupUnstartedThread(); _ASSERTE(newThread != nullptr); INDEBUG(s_backgroundWorkerThread = newThread); #ifdef FEATURE_COMINTEROP newThread->SetApartment(Thread::AS_InMTA); #endif newThread->SetBackground(true); if (!newThread->CreateNewThread(0, BackgroundWorkerBootstrapper0, newThread, W(".NET Tiered Compilation Worker"))) { newThread->DecExternalCount(false); ThrowOutOfMemory(); } newThread->StartThread(); } EX_CATCH { { LockHolder tieredCompilationLockHolder; s_isBackgroundWorkerProcessingWork = false; s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); } EX_RETHROW; } EX_END_CATCH(RethrowTerminalExceptions); } DWORD WINAPI TieredCompilationManager::BackgroundWorkerBootstrapper0(LPVOID args) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(args != nullptr); Thread *thread = (Thread *)args; _ASSERTE(s_backgroundWorkerThread == thread); if (!thread->HasStarted()) { LockHolder tieredCompilationLockHolder; s_isBackgroundWorkerProcessingWork = false; s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); return 0; } _ASSERTE(GetThread() == thread); ManagedThreadBase::KickOff(BackgroundWorkerBootstrapper1, nullptr); GCX_PREEMP_NO_DTOR(); DestroyThread(thread); return 0; } void TieredCompilationManager::BackgroundWorkerBootstrapper1(LPVOID) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; GCX_PREEMP(); GetAppDomain()->GetTieredCompilationManager()->BackgroundWorkerStart(); } void TieredCompilationManager::BackgroundWorkerStart() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(s_backgroundWorkAvailableEvent.IsValid()); DWORD timeoutMs = g_pConfig->TieredCompilation_BackgroundWorkerTimeoutMs(); DWORD delayMs = g_pConfig->TieredCompilation_CallCountingDelayMs(); int processorCount = GetCurrentProcessCpuCount(); _ASSERTE(processorCount > 0); LARGE_INTEGER li; QueryPerformanceFrequency(&li); UINT64 ticksPerS = li.QuadPart; UINT64 maxWorkDurationTicks = ticksPerS * 50 / 1000; // 50 ms UINT64 minWorkDurationTicks = min(ticksPerS * processorCount / 1000, maxWorkDurationTicks); // <proc count> ms (capped) UINT64 workDurationTicks = minWorkDurationTicks; while (true) { _ASSERTE(s_isBackgroundWorkerRunning); _ASSERTE(s_isBackgroundWorkerProcessingWork); if (IsTieringDelayActive()) { do { ClrSleepEx(delayMs, false); } while (!TryDeactivateTieringDelay()); } // Don't want to perform background work as soon as it is scheduled if there is possibly more important work that could // be done. Some operating systems may also give a thread woken by a signal higher priority temporarily, which on a // CPU-limited environment may lead to rejitting a method as soon as it's promoted, effectively in the foreground. ClrSleepEx(0, false); if (IsTieringDelayActive()) { continue; } if ((m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0) && !DoBackgroundWork(&workDurationTicks, minWorkDurationTicks, maxWorkDurationTicks)) { // Background work was interrupted due to the tiering delay being activated _ASSERTE(IsTieringDelayActive()); continue; } { LockHolder tieredCompilationLockHolder; if (IsTieringDelayActive() || m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0) { continue; } s_isBackgroundWorkerProcessingWork = false; } // Wait for the worker to be scheduled again DWORD waitResult = s_backgroundWorkAvailableEvent.Wait(timeoutMs, false); if (waitResult == WAIT_OBJECT_0) { continue; } _ASSERTE(waitResult == WAIT_TIMEOUT); // The wait timed out, see if the worker can exit LockHolder tieredCompilationLockHolder; if (s_isBackgroundWorkerProcessingWork) { // The background worker got scheduled again just as the wait timed out. The event would have been signaled just // after the wait had timed out, so reset it and continue processing work. s_backgroundWorkAvailableEvent.Reset(); continue; } s_isBackgroundWorkerRunning = false; INDEBUG(s_backgroundWorkerThread = nullptr); return; } } bool TieredCompilationManager::IsTieringDelayActive() { LIMITED_METHOD_CONTRACT; return m_methodsPendingCountingForTier1 != nullptr; } bool TieredCompilationManager::TryDeactivateTieringDelay() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; _ASSERTE(GetThread() == s_backgroundWorkerThread); SArray<MethodDesc *> *methodsPendingCounting = nullptr; UINT32 countOfNewMethodsCalledDuringDelay = 0; { // It's possible for the timer to tick before it is recorded that the delay is in effect. This lock guarantees that // the delay is in effect. LockHolder tieredCompilationLockHolder; _ASSERTE(IsTieringDelayActive()); if (m_tier1CallCountingCandidateMethodRecentlyRecorded) { m_tier1CallCountingCandidateMethodRecentlyRecorded = false; return false; } // Exchange information into locals inside the lock methodsPendingCounting = m_methodsPendingCountingForTier1; _ASSERTE(methodsPendingCounting != nullptr); m_methodsPendingCountingForTier1 = nullptr; countOfNewMethodsCalledDuringDelay = m_countOfNewMethodsCalledDuringDelay; m_countOfNewMethodsCalledDuringDelay = 0; _ASSERTE(!IsTieringDelayActive()); } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { ETW::CompilationLog::TieredCompilation::Runtime::SendResume(countOfNewMethodsCalledDuringDelay); } // Install call counters { MethodDesc** methods = methodsPendingCounting->GetElements(); COUNT_T methodCount = methodsPendingCounting->GetCount(); CodeVersionManager *codeVersionManager = GetAppDomain()->GetCodeVersionManager(); MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder; // Backpatching entry point slots requires cooperative GC mode, see // MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that // may be taken in any GC mode. The lock is taken in cooperative GC mode on some other paths, so the same ordering // must be used here to prevent deadlock. GCX_COOP(); CodeVersionManager::LockHolder codeVersioningLockHolder; for (COUNT_T i = 0; i < methodCount; ++i) { MethodDesc *methodDesc = methods[i]; _ASSERTE(codeVersionManager == methodDesc->GetCodeVersionManager()); NativeCodeVersion activeCodeVersion = codeVersionManager->GetActiveILCodeVersion(methodDesc).GetActiveNativeCodeVersion(methodDesc); if (activeCodeVersion.IsNull()) { continue; } EX_TRY { bool wasSet = CallCountingManager::SetCodeEntryPoint(activeCodeVersion, activeCodeVersion.GetNativeCode(), false, nullptr); _ASSERTE(wasSet); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DeactivateTieringDelay: " "Exception in CallCountingManager::SetCodeEntryPoint, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); } } delete methodsPendingCounting; return true; } void TieredCompilationManager::AsyncCompleteCallCounting() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; { LockHolder tieredCompilationLockHolder; if (m_recentlyRequestedCallCountingCompletion) { _ASSERTE(m_isPendingCallCountingCompletion); } else { m_isPendingCallCountingCompletion = true; // A potentially large number of methods may reach the call count threshold at about the same time or in bursts. // This field is used to coalesce a burst of pending completions, see the background work. m_recentlyRequestedCallCountingCompletion = true; } // The thread is in a GC_NOTRIGGER scope here. If the background worker is already running, we can schedule it inside // the same lock without triggering a GC. if (TryScheduleBackgroundWorkerWithoutGCTrigger_Locked()) { return; } } CreateBackgroundWorker(); // requires GC_TRIGGERS } //This method will process one or more methods from optimization queue // on a background thread. Each such method will be jitted with code // optimizations enabled and then installed as the active implementation // of the method entrypoint. bool TieredCompilationManager::DoBackgroundWork( UINT64 *workDurationTicksRef, UINT64 minWorkDurationTicks, UINT64 maxWorkDurationTicks) { WRAPPER_NO_CONTRACT; _ASSERTE(GetThread() == s_backgroundWorkerThread); _ASSERTE(m_isPendingCallCountingCompletion || m_countOfMethodsToOptimize != 0); _ASSERTE(workDurationTicksRef != nullptr); _ASSERTE(minWorkDurationTicks <= maxWorkDurationTicks); UINT64 workDurationTicks = *workDurationTicksRef; _ASSERTE(workDurationTicks >= minWorkDurationTicks); _ASSERTE(workDurationTicks <= maxWorkDurationTicks); if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(countOfMethodsToOptimize); } bool sendStopEvent = true; bool allMethodsJitted = false; UINT32 jittedMethodCount = 0; LARGE_INTEGER li; QueryPerformanceCounter(&li); UINT64 startTicks = li.QuadPart; UINT64 previousTicks = startTicks; do { bool completeCallCounting = false; NativeCodeVersion nativeCodeVersionToOptimize; { LockHolder tieredCompilationLockHolder; if (IsTieringDelayActive()) { break; } bool wasPendingCallCountingCompletion = m_isPendingCallCountingCompletion; if (wasPendingCallCountingCompletion) { if (m_recentlyRequestedCallCountingCompletion) { // A potentially large number of methods may reach the call count threshold at about the same time or in // bursts. To coalesce a burst of pending completions a bit, if another method has reached the call count // threshold since the last time it was checked here, don't complete call counting yet. Coalescing // call counting completions a bit helps to avoid blocking foreground threads due to lock contention as // methods are continuing to reach the call count threshold. m_recentlyRequestedCallCountingCompletion = false; } else { m_isPendingCallCountingCompletion = false; completeCallCounting = true; } } if (!completeCallCounting) { nativeCodeVersionToOptimize = GetNextMethodToOptimize(); if (nativeCodeVersionToOptimize.IsNull()) { // Ran out of methods to JIT if (wasPendingCallCountingCompletion) { // If call counting completions are pending and delayed above for coalescing, complete call counting // now, as that will add more methods to be rejitted m_isPendingCallCountingCompletion = false; _ASSERTE(!m_recentlyRequestedCallCountingCompletion); completeCallCounting = true; } else { allMethodsJitted = true; break; } } } } _ASSERTE(completeCallCounting == !!nativeCodeVersionToOptimize.IsNull()); if (completeCallCounting) { EX_TRY { CallCountingManager::CompleteCallCounting(); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DoBackgroundWork: " "Exception in CallCountingManager::CompleteCallCounting, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); continue; } OptimizeMethod(nativeCodeVersionToOptimize); ++jittedMethodCount; // Yield the thread periodically to give preference to possibly more important work QueryPerformanceCounter(&li); UINT64 currentTicks = li.QuadPart; if (currentTicks - startTicks < workDurationTicks) { previousTicks = currentTicks; continue; } if (currentTicks - previousTicks >= maxWorkDurationTicks) { // It's unlikely that one iteration above would have taken that long, more likely this thread got scheduled out for // a while, in which case there is no need to yield again. Discount the time taken for the previous iteration and // continue processing work. startTicks += currentTicks - previousTicks; previousTicks = currentTicks; continue; } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(countOfMethodsToOptimize, jittedMethodCount); } UINT64 beforeSleepTicks = currentTicks; ClrSleepEx(0, false); QueryPerformanceCounter(&li); currentTicks = li.QuadPart; // Depending on how oversubscribed thread usage is on the system, the sleep may have caused this thread to not be // scheduled for a long time. Yielding the thread too frequently may significantly slow down the background work, which // may significantly delay how long it takes to reach steady-state performance. On the other hand, yielding the thread // too infrequently may cause the background work to monopolize the available CPU resources and prevent more important // foreground work from occurring. So the sleep duration is measured and for the next batch of background work, at least // a portion of that measured duration is used (within the min and max to keep things sensible). Since the background // work duration is capped to a maximum and since a long sleep delay is likely to repeat, to avoid going back to // too-frequent yielding too quickly, the background work duration is decayed back to the minimum if the sleep duration // becomes consistently short. UINT64 newWorkDurationTicks = (currentTicks - beforeSleepTicks) / 4; UINT64 decayedWorkDurationTicks = (workDurationTicks + workDurationTicks / 2) / 2; workDurationTicks = newWorkDurationTicks < decayedWorkDurationTicks ? decayedWorkDurationTicks : newWorkDurationTicks; if (workDurationTicks < minWorkDurationTicks) { workDurationTicks = minWorkDurationTicks; } else if (workDurationTicks > maxWorkDurationTicks) { workDurationTicks = maxWorkDurationTicks; } if (IsTieringDelayActive()) { sendStopEvent = false; break; } if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(countOfMethodsToOptimize); } jittedMethodCount = 0; startTicks = previousTicks = currentTicks; } while (!IsTieringDelayActive()); if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled() && sendStopEvent) { UINT32 countOfMethodsToOptimize = m_countOfMethodsToOptimize; if (m_isPendingCallCountingCompletion) { countOfMethodsToOptimize += CallCountingManager::GetCountOfCodeVersionsPendingCompletion(); } ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(countOfMethodsToOptimize, jittedMethodCount); } if (allMethodsJitted) { EX_TRY { CallCountingManager::StopAndDeleteAllCallCountingStubs(); } EX_CATCH { STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::DoBackgroundWork: " "Exception in CallCountingManager::StopAndDeleteAllCallCountingStubs, hr=0x%x\n", GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions); } *workDurationTicksRef = workDurationTicks; return allMethodsJitted; } // Jit compiles and installs new optimized code for a method. // Called on a background thread. void TieredCompilationManager::OptimizeMethod(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; _ASSERTE(nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation()); if (CompileCodeVersion(nativeCodeVersion)) { ActivateCodeVersion(nativeCodeVersion); } } // Compiles new optimized code for a method. // Called on a background thread. BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; PCODE pCode = NULL; MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc(); EX_TRY { PrepareCodeConfigBuffer configBuffer(nativeCodeVersion); PrepareCodeConfig *config = configBuffer.GetConfig(); // This is a recompiling request which means the caller was // in COOP mode since the code already ran. _ASSERTE(!pMethod->HasUnmanagedCallersOnlyAttribute()); config->SetCallerGCMode(CallerGCMode::Coop); pCode = pMethod->PrepareCode(config); LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::CompileCodeVersion Method=0x%pM (%s::%s), code version id=0x%x, code ptr=0x%p\n", pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName, nativeCodeVersion.GetVersionId(), pCode)); if (config->JitSwitchedToMinOpt()) { // The JIT decided to switch to min-opts, likely due to the method being very large or complex. The rejitted code // may be slower if the method had been prejitted. Ignore the rejitted code and continue using the tier 0 entry // point. // TODO: In the future, we should get some feedback from images containing pregenerated code and from tier 0 JIT // indicating that the method would not benefit from a rejit and avoid the rejit altogether. pCode = NULL; } } EX_CATCH { // Failing to jit should be rare but acceptable. We will leave whatever code already exists in place. STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::CompileCodeVersion: Method %pM failed to jit, hr=0x%x\n", pMethod, GET_EXCEPTION()->GetHR()); } EX_END_CATCH(RethrowTerminalExceptions) return pCode != NULL; } // Updates the MethodDesc and precode so that future invocations of a method will // execute the native code pointed to by pCode. // Called on a background thread. void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeVersion) { STANDARD_VM_CONTRACT; MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc(); // If the ilParent version is active this will activate the native code version now. // Otherwise if the ilParent version becomes active again in the future the native // code version will activate then. ILCodeVersion ilParent; HRESULT hr = S_OK; { bool mayHaveEntryPointSlotsToBackpatch = pMethod->MayHaveEntryPointSlotsToBackpatch(); MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder( mayHaveEntryPointSlotsToBackpatch); // Backpatching entry point slots requires cooperative GC mode, see // MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that // may be taken in any GC mode. The lock is taken in cooperative GC mode on some other paths, so the same ordering // must be used here to prevent deadlock. GCX_MAYBE_COOP(mayHaveEntryPointSlotsToBackpatch); CodeVersionManager::LockHolder codeVersioningLockHolder; // As long as we are exclusively using any non-JumpStamp publishing for tiered compilation // methods this first attempt should succeed ilParent = nativeCodeVersion.GetILCodeVersion(); hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion); LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::ActivateCodeVersion Method=0x%pM (%s::%s), code version id=0x%x. SetActiveNativeCodeVersion ret=0x%x\n", pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName, nativeCodeVersion.GetVersionId(), hr)); } if (FAILED(hr)) { STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::ActivateCodeVersion: " "Method %pM failed to publish native code for native code version %d\n", pMethod, nativeCodeVersion.GetVersionId()); } } // Dequeues the next method in the optmization queue. // This runs on the background thread. NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(IsLockOwnedByCurrentThread()); SListElem<NativeCodeVersion>* pElem = m_methodsToOptimize.RemoveHead(); if (pElem != NULL) { NativeCodeVersion nativeCodeVersion = pElem->GetValue(); delete pElem; _ASSERTE(m_countOfMethodsToOptimize != 0); --m_countOfMethodsToOptimize; return nativeCodeVersion; } return NativeCodeVersion(); } //static CORJIT_FLAGS TieredCompilationManager::GetJitFlags(PrepareCodeConfig *config) { WRAPPER_NO_CONTRACT; _ASSERTE(config != nullptr); _ASSERTE( !config->WasTieringDisabledBeforeJitting() || config->GetCodeVersion().GetOptimizationTier() != NativeCodeVersion::OptimizationTier0); CORJIT_FLAGS flags; // Determine the optimization tier for the default code version (slightly faster common path during startup compared to // below), and disable call counting and set the optimization tier if it's not going to be tier 0 (this is used in other // places for the default code version where necessary to avoid the extra expense of GetOptimizationTier()). NativeCodeVersion nativeCodeVersion = config->GetCodeVersion(); if (nativeCodeVersion.IsDefaultVersion() && !config->WasTieringDisabledBeforeJitting()) { MethodDesc *methodDesc = nativeCodeVersion.GetMethodDesc(); if (!methodDesc->IsEligibleForTieredCompilation()) { _ASSERTE(nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTierOptimized); #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif return flags; } NativeCodeVersion::OptimizationTier newOptimizationTier; if (!methodDesc->RequestedAggressiveOptimization()) { if (g_pConfig->TieredCompilation_QuickJit()) { _ASSERTE(nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0); flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); return flags; } newOptimizationTier = NativeCodeVersion::OptimizationTierOptimized; } else { newOptimizationTier = NativeCodeVersion::OptimizationTier1; flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1); } methodDesc->GetLoaderAllocator()->GetCallCountingManager()->DisableCallCounting(nativeCodeVersion); nativeCodeVersion.SetOptimizationTier(newOptimizationTier); #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif return flags; } switch (nativeCodeVersion.GetOptimizationTier()) { case NativeCodeVersion::OptimizationTier0: if (g_pConfig->TieredCompilation_QuickJit()) { flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); break; } nativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized); goto Optimized; #ifdef FEATURE_ON_STACK_REPLACEMENT case NativeCodeVersion::OptimizationTier1OSR: flags.Set(CORJIT_FLAGS::CORJIT_FLAG_OSR); FALLTHROUGH; #endif case NativeCodeVersion::OptimizationTier1: flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1); FALLTHROUGH; case NativeCodeVersion::OptimizationTierOptimized: Optimized: #ifdef FEATURE_INTERPRETER flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE); #endif break; default: UNREACHABLE(); } return flags; } #ifdef _DEBUG bool TieredCompilationManager::IsLockOwnedByCurrentThread() { WRAPPER_NO_CONTRACT; return !!s_lock.OwnedByCurrentThread(); } #endif // _DEBUG #endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/gc/vxsort/vxsort_targets_enable_avx512.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute push (__attribute__((target("avx512f,avx512dq"))), apply_to = any(function)) #else #pragma GCC push_options #pragma GCC target("avx512f,avx512dq") #endif #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifdef __GNUC__ #ifdef __clang__ #pragma clang attribute push (__attribute__((target("avx512f,avx512dq"))), apply_to = any(function)) #else #pragma GCC push_options #pragma GCC target("avx512f,avx512dq") #endif #endif
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/vm/gccover.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __GCCOVER_H__ #define __GCCOVER_H__ #ifdef HAVE_GCCOVER /****************************************************************************/ /* GCCOverageInfo holds the state of which instructions have been visited by a GC and which ones have not */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200 ) // zero-sized array #endif // _MSC_VER class GCCoverageInfo { public: IJitManager::MethodRegionInfo methodRegion; BYTE* curInstr; // The last instruction that was able to execute // Following 6 variables are for prolog / epilog walking coverage ICodeManager* codeMan; // CodeMan for this method GCInfoToken gcInfoToken; // gcInfo for this method Thread* callerThread; // Thread associated with context callerRegs T_CONTEXT callerRegs; // register state when method was entered unsigned gcCount; // GC count at the time we caputured the regs bool doingEpilogChecks; // are we doing epilog unwind checks? (do we care about callerRegs?) enum { hasExecutedSize = 4 }; unsigned hasExecuted[hasExecutedSize]; unsigned totalCount; union { BYTE savedCode[0]; // really variable sized // Note that DAC doesn't marshal the entire byte array automatically. // Any client of this field needs to get the TADDR of this field and // marshal over the bytes properly. }; // Sloppy bitsets (will wrap, and not threadsafe) but best effort is OK // since we just need half decent coverage. BOOL IsBitSetForOffset(unsigned offset) { unsigned dword = hasExecuted[(offset >> 5) % hasExecutedSize]; return(dword & (1 << (offset & 0x1F))); } void SetBitForOffset(unsigned offset) { unsigned* dword = &hasExecuted[(offset >> 5) % hasExecutedSize]; *dword |= (1 << (offset & 0x1F)) ; } void SprinkleBreakpoints(BYTE * saveAddr, PCODE codeStart, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped); }; typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::savedCode #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER #if defined(TARGET_X86) || defined(TARGET_AMD64) #define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do) #define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction #define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register #define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register #define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers #elif defined(TARGET_ARM) // 16-bit illegal instructions which will cause exception and cause // control to go to GcStress codepath #define INTERRUPT_INSTR 0xde00 #define INTERRUPT_INSTR_CALL 0xde03 // 0xde01 generates SIGTRAP (breakpoint) instead of SIGILL on Unix #define INTERRUPT_INSTR_PROTECT_RET 0xde02 // 32-bit illegal instructions. It is necessary to replace a 16-bit instruction // with a 16-bit illegal instruction, and a 32-bit instruction with a 32-bit // illegal instruction, to make GC stress with the "IT" instruction work, since // it counts the number of instructions that follow it, so we can't change that // number by replacing a 32-bit instruction with a 16-bit illegal instruction // followed by 16 bits of junk that might end up being a legal instruction. // Use the "Permanently UNDEFINED" section in the "ARM Architecture Reference Manual", // section A6.3.4 "Branches and miscellaneous control" table. // Note that we write these as a single 32-bit write, not two 16-bit writes, so the values // need to be arranged as the ARM decoder wants them, with the high-order halfword first // (in little-endian order). #define INTERRUPT_INSTR_32 0xa001f7f0 // 0xf7f0a001 #define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002 #define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003 #elif defined(TARGET_ARM64) // The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of // "Arm Architecture Reference Manual ARMv8" // #define INTERRUPT_INSTR 0xBADC0DE0 #define INTERRUPT_INSTR_CALL 0xBADC0DE1 #define INTERRUPT_INSTR_PROTECT_RET 0xBADC0DE2 #endif // _TARGET_* // The body of this method is in this header file to allow // mscordaccore.dll to link without getting an unsat symbol // inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal) { #if defined(TARGET_ARM64) switch (instrVal) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_RET: return true; default: return false; } #elif defined(TARGET_ARM) UINT16 instrVal16 = static_cast<UINT16>(instrVal); size_t instrLen = GetARMInstructionLength(instrVal16); if (instrLen == 2) { switch (instrVal16) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_RET: return true; default: return false; } } else { _ASSERTE(instrLen == 4); switch (instrVal) { case INTERRUPT_INSTR_32: case INTERRUPT_INSTR_CALL_32: case INTERRUPT_INSTR_PROTECT_RET_32: return true; default: return false; } } #else // x64 and x86 switch (instrVal) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_FIRST_RET: case INTERRUPT_INSTR_PROTECT_SECOND_RET: case INTERRUPT_INSTR_PROTECT_BOTH_RET: return true; default: return false; } #endif // _TARGET_XXXX_ } bool IsGcCoverageInterruptInstruction(PBYTE instrPtr); bool IsGcCoverageInterrupt(LPVOID ip); #endif // HAVE_GCCOVER #endif // !__GCCOVER_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __GCCOVER_H__ #define __GCCOVER_H__ #ifdef HAVE_GCCOVER /****************************************************************************/ /* GCCOverageInfo holds the state of which instructions have been visited by a GC and which ones have not */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200 ) // zero-sized array #endif // _MSC_VER class GCCoverageInfo { public: IJitManager::MethodRegionInfo methodRegion; BYTE* curInstr; // The last instruction that was able to execute // Following 6 variables are for prolog / epilog walking coverage ICodeManager* codeMan; // CodeMan for this method GCInfoToken gcInfoToken; // gcInfo for this method Thread* callerThread; // Thread associated with context callerRegs T_CONTEXT callerRegs; // register state when method was entered unsigned gcCount; // GC count at the time we caputured the regs bool doingEpilogChecks; // are we doing epilog unwind checks? (do we care about callerRegs?) enum { hasExecutedSize = 4 }; unsigned hasExecuted[hasExecutedSize]; unsigned totalCount; union { BYTE savedCode[0]; // really variable sized // Note that DAC doesn't marshal the entire byte array automatically. // Any client of this field needs to get the TADDR of this field and // marshal over the bytes properly. }; // Sloppy bitsets (will wrap, and not threadsafe) but best effort is OK // since we just need half decent coverage. BOOL IsBitSetForOffset(unsigned offset) { unsigned dword = hasExecuted[(offset >> 5) % hasExecutedSize]; return(dword & (1 << (offset & 0x1F))); } void SetBitForOffset(unsigned offset) { unsigned* dword = &hasExecuted[(offset >> 5) % hasExecutedSize]; *dword |= (1 << (offset & 0x1F)) ; } void SprinkleBreakpoints(BYTE * saveAddr, PCODE codeStart, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped); }; typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::savedCode #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER #if defined(TARGET_X86) || defined(TARGET_AMD64) #define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do) #define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction #define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register #define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register #define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers #elif defined(TARGET_ARM) // 16-bit illegal instructions which will cause exception and cause // control to go to GcStress codepath #define INTERRUPT_INSTR 0xde00 #define INTERRUPT_INSTR_CALL 0xde03 // 0xde01 generates SIGTRAP (breakpoint) instead of SIGILL on Unix #define INTERRUPT_INSTR_PROTECT_RET 0xde02 // 32-bit illegal instructions. It is necessary to replace a 16-bit instruction // with a 16-bit illegal instruction, and a 32-bit instruction with a 32-bit // illegal instruction, to make GC stress with the "IT" instruction work, since // it counts the number of instructions that follow it, so we can't change that // number by replacing a 32-bit instruction with a 16-bit illegal instruction // followed by 16 bits of junk that might end up being a legal instruction. // Use the "Permanently UNDEFINED" section in the "ARM Architecture Reference Manual", // section A6.3.4 "Branches and miscellaneous control" table. // Note that we write these as a single 32-bit write, not two 16-bit writes, so the values // need to be arranged as the ARM decoder wants them, with the high-order halfword first // (in little-endian order). #define INTERRUPT_INSTR_32 0xa001f7f0 // 0xf7f0a001 #define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002 #define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003 #elif defined(TARGET_ARM64) // The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of // "Arm Architecture Reference Manual ARMv8" // #define INTERRUPT_INSTR 0xBADC0DE0 #define INTERRUPT_INSTR_CALL 0xBADC0DE1 #define INTERRUPT_INSTR_PROTECT_RET 0xBADC0DE2 #endif // _TARGET_* // The body of this method is in this header file to allow // mscordaccore.dll to link without getting an unsat symbol // inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal) { #if defined(TARGET_ARM64) switch (instrVal) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_RET: return true; default: return false; } #elif defined(TARGET_ARM) UINT16 instrVal16 = static_cast<UINT16>(instrVal); size_t instrLen = GetARMInstructionLength(instrVal16); if (instrLen == 2) { switch (instrVal16) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_RET: return true; default: return false; } } else { _ASSERTE(instrLen == 4); switch (instrVal) { case INTERRUPT_INSTR_32: case INTERRUPT_INSTR_CALL_32: case INTERRUPT_INSTR_PROTECT_RET_32: return true; default: return false; } } #else // x64 and x86 switch (instrVal) { case INTERRUPT_INSTR: case INTERRUPT_INSTR_CALL: case INTERRUPT_INSTR_PROTECT_FIRST_RET: case INTERRUPT_INSTR_PROTECT_SECOND_RET: case INTERRUPT_INSTR_PROTECT_BOTH_RET: return true; default: return false; } #endif // _TARGET_XXXX_ } bool IsGcCoverageInterruptInstruction(PBYTE instrPtr); bool IsGcCoverageInterrupt(LPVOID ip); #endif // HAVE_GCCOVER #endif // !__GCCOVER_H__
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/Interop/COM/NativeServer/ColorTesting.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "Servers.h" #define RED RGB(0xFF, 0x00, 0x00) class ColorTesting : public UnknownImpl, public IColorTesting { public: // IColorTesting DEF_FUNC(AreColorsEqual)( _In_ OLE_COLOR managed, _In_ OLE_COLOR native, _Out_ BOOL* areEqual ) { *areEqual = (managed == native ? TRUE : FALSE); return S_OK; } DEF_FUNC(GetRed)( _Out_ OLE_COLOR* color ) { *color = RED; return S_OK; } public: // IUnknown STDMETHOD(QueryInterface)( /* [in] */ REFIID riid, /* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR *__RPC_FAR *ppvObject) { return DoQueryInterface(riid, ppvObject, static_cast<IColorTesting *>(this)); } DEFINE_REF_COUNTING(); };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "Servers.h" #define RED RGB(0xFF, 0x00, 0x00) class ColorTesting : public UnknownImpl, public IColorTesting { public: // IColorTesting DEF_FUNC(AreColorsEqual)( _In_ OLE_COLOR managed, _In_ OLE_COLOR native, _Out_ BOOL* areEqual ) { *areEqual = (managed == native ? TRUE : FALSE); return S_OK; } DEF_FUNC(GetRed)( _Out_ OLE_COLOR* color ) { *color = RED; return S_OK; } public: // IUnknown STDMETHOD(QueryInterface)( /* [in] */ REFIID riid, /* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR *__RPC_FAR *ppvObject) { return DoQueryInterface(riid, ppvObject, static_cast<IColorTesting *>(this)); } DEFINE_REF_COUNTING(); };
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/baseservices/compilerservices/dynamicobjectproperties/dev10_535767.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // Basic test for dependent handles. // // Note that though this test uses ConditionalWeakTable it is not a test for that class. This is a stress // test that utilizes ConditionalWeakTable features, which would be used heavily if Dynamic Language Runtime // catches on. // // Basic test overview: // * Allocate an array of objects (we call these Nodes) with finalizers. // * Create a set of dependent handles that reference these objects as primary and secondary members (this is // where ConditionalWeakTable comes in, adding a key/value pair to such a table creates a dependent handle // with the primary set to the key and the secondary set to the value). // * Null out selected objects from the array in various patterns. This removes the only normal strong root // for such objects (leaving only the dependent handles to provide additional roots). // * Perform a full GC and wait for it and finalization to complete. Each object which is collected will use // its finalizer to inform the test that it's been disposed of. // * Run our own reachability analysis (a simple mark array approach) to build a picture of which objects in // the array should have been collected or not. // * Validate that the actual set of live objects matches our computed live set exactly. // // Test variations include the number of objects allocated, the relationship between the primary and secondary // in each handle we allocate and the pattern with which we null out object references in the array. // // Additionally this test stresses substantially more complex code paths in the GC if server mode is enabled. // This can be achieved by setting the environment variable COMPlus_BuildFlavor=svr prior to executing the // test executable. // // Note that we don't go to any lengths to ensure that dependent handle ownership is spread over multiple cpus // on a server GC/MP test run. For large node counts (e.g. 100000) this happens naturally since initialization // takes a while with multiple thread/CPU switches involved. We could be more explicit here (allocate handles // using CPU affinitized threads) but if we do that we'd probably better look into different patterns of node // ownership to avoid unintentionally restricting our test coverage. // // Another area into which we could look deeper is trying to force mark stack overflows in the GC (presumably // by allocating complex object graphs with lots of interconnections, though I don't the specifics of the best // way to force this). Causing mark stack overflows should open up a class of bug the old dependent handle // implementation was subject to without requiring server GC mode or multiple CPUs. // using System; using System.Runtime.CompilerServices; // How we assign nodes to dependent handles. enum TableStyle { Unconnected, // The primary and secondary handles are assigned completely disjoint objects ForwardLinked, // The primary of each handle is the secondary of the previous handle BackwardLinked, // The primary of each handle is the secondary of the next handle Random // The primaries are each object in sequence, the secondaries are selected randomly from // the same set } // How we choose object references in the array to null out (and thus potentially become collected). enum CollectStyle { None, // Don't null out any (nothing should be collected) All, // Null them all out (any remaining live objects should be collected) Alternate, // Null out every second reference Random // Null out each entry with a 50% probability } // We report errors by throwing an exception. Define our own Exception subclass so we can identify these // errors unambiguously. class TestException : Exception { // We just supply a simple message string on error. public TestException(string message) : base(message) { } } // Class encapsulating test runs over a set of objects/handles allocated with the specified TableStyle. class TestSet { // Create a new test with the given table style and object count. public TestSet(TableStyle ts, int count) { // Use one random number generator for the life of the test. Could support explicit seeds for // reproducible tests here. m_rng = new Random(); // Remember our parameters. m_count = count; m_style = ts; // Various arrays. m_nodes = new Node[count]; // The array of objects m_collected = new bool[count]; // Records whether each object has been collected (entries are set by // the finalizer on Node) m_marks = new bool[count]; // Array used during individual test runs to calculate whether each // object should still be alive (allocated once here to avoid // injecting further garbage collections at run time) // Allocate each object (Node). Each knows its own unique ID (the index into the node array) and has a // back pointer to this test object (so it can phone home to report its own collection at finalization // time). for (int i = 0; i < count; i++) m_nodes[i] = new Node(this, i); // Determine how many handles we need to allocate given the number of nodes. This varies based on the // table style. switch (ts) { case TableStyle.Unconnected: // Primaries and secondaries are completely different objects so we split our nodes in half and // allocate that many handles. m_handleCount = count / 2; break; case TableStyle.ForwardLinked: // Nodes are primaries in one handle and secondary in another except one that falls off the end. // So we have as many handles as nodes - 1. m_handleCount = count - 1; break; case TableStyle.BackwardLinked: // Nodes are primaries in one handle and secondary in another except one that falls off the end. // So we have as many handles as nodes - 1. m_handleCount = count - 1; break; case TableStyle.Random: // Each node is a primary in some handle (secondaries are selected from amongst all the same nodes // randomly). So we have as many nodes as handles. m_handleCount = count; break; } // Allocate an array of HandleSpecs. These aren't the real handles, just structures that allow us // remember what's in each handle (in terms of the node index number for the primary and secondary). // We need to track this information separately because we can't access the real handles directly // (ConditionalWeakTable hides them) and we need to recall exactly what the primary and secondary of // each handle is so we can compute our own notion of object liveness later. m_handles = new HandleSpec[m_handleCount]; // Initialize the handle specs to assign objects to handles based on the table style. for (int i = 0; i < m_handleCount; i++) { int primary = -1, secondary = -1; switch (ts) { case TableStyle.Unconnected: // Assign adjacent nodes to the primary and secondary of each handle. primary = i * 2; secondary = (i * 2) + 1; break; case TableStyle.ForwardLinked: // Primary of each handle is the secondary of the last handle. primary = i; secondary = i + 1; break; case TableStyle.BackwardLinked: // Primary of each handle is the secondary of the next handle. primary = i + 1; secondary = i; break; case TableStyle.Random: // Primary is each node in sequence, secondary is any of the nodes randomly. primary = i; secondary = m_rng.Next(m_handleCount); break; } m_handles[i].Set(primary, secondary); } // Allocate a ConditionalWeakTable mapping Node keys to Node values. m_table = new ConditionalWeakTable<Node, Node>(); // Using our handle specs computed above add each primary/secondary node pair to the // ConditionalWeakTable in turn. This causes the ConditionalWeakTable to allocate a dependent handle // for each entry with the primary and secondary objects we specified as keys and values (note that // this scheme prevents us from creating multiple handles with the same primary though if this is // desired we could achieve it by allocating multiple ConditionalWeakTables). for (int i = 0; i < m_handleCount; i++) m_table.Add(m_nodes[m_handles[i].m_primary], m_nodes[m_handles[i].m_secondary]); } // Call this method to indicate a test error with a given message. This will terminate the test // immediately. void Error(string message) { throw new TestException(message); } // Run a single test pass on the node set. Null out node references according to the given CollectStyle, // run a garbage collection and then verify that each node is either live or dead as we predict. Take care // of the order in which test runs are made against a single TestSet: e.g. running a CollectStyle.All will // collect all nodes, rendering further runs relatively uninteresting. public void Run(CollectStyle cs) { Console.WriteLine("Running test TS:{0} CS:{1} {2} entries...", Enum.GetName(typeof(TableStyle), m_style), Enum.GetName(typeof(CollectStyle), cs), m_count); // Iterate over the array of nodes deciding for each whether to sever the reference (null out the // entry). for (int i = 0; i < m_count; i++) { bool sever = false; switch (cs) { case CollectStyle.All: // Sever all references. sever = true; break; case CollectStyle.None: // Don't sever any references. break; case CollectStyle.Alternate: // Sever every second reference (starting with the first). if ((i % 2) == 0) sever = true; break; case CollectStyle.Random: // Sever any reference with a 50% probability. if (m_rng.Next(100) > 50) sever = true; break; } if (sever) m_nodes[i] = null; } // Initialize a full GC and wait for all finalizers to complete (so we get an accurate picture of // which nodes were collected). GC.Collect(); GC.WaitForPendingFinalizers(); GC.WaitForPendingFinalizers(); // the above call may correspond to a GC prior to the Collect above, call it again // Calculate our own view of which nodes should be alive or dead. Use a simple mark array for this. // Once the algorithm is complete a true value at a given index in the array indicates a node that // should still be alive, otherwise the node should have been collected. // Initialize the mark array. Set true for nodes we still have a strong reference to from the array // (these should definitely not have been collected yet). Set false for the other nodes (we assume // they must have been collected until we prove otherwise). for (int i = 0; i < m_count; i++) m_marks[i] = m_nodes[i] != null; // Perform multiple passes over the handles we allocated (or our recorded version of the handles at // least). If we find a handle with a marked (live) primary where the secondary is not yet marked then // go ahead and mark that secondary (dependent handles are defined to do this: primaries act as if // they have a strong reference to the secondary up until the point they are collected). Repeat this // until we manage a scan over the entire table without marking any additional nodes as live. At this // point the marks array should reflect which objects are still live. while (true) { // Assume we're not going any further nodes to mark as live. bool marked = false; // Look at each handle in turn. for (int i = 0; i < m_handleCount; i++) if (m_marks[m_handles[i].m_primary]) { // Primary is live. if (!m_marks[m_handles[i].m_secondary]) { // Secondary wasn't marked as live yet. Do so and remember that we marked at least // node as live this pass (so we need to loop again since this secondary could be the // same as a primary earlier in the table). m_marks[m_handles[i].m_secondary] = true; marked = true; } } // Terminate the loop if we scanned the entire table without marking any additional nodes as live // (since additional scans can't make any difference). if (!marked) break; } // Validate our view of node liveness (m_marks) correspond to reality (m_nodes and m_collected). for (int i = 0; i < m_count; i++) { // Catch nodes which still have strong references but have collected anyway. This is stricly a // subset of the next test but it would be a very interesting bug to call out. if (m_nodes[i] != null && m_collected[i]) Error(String.Format("Node {0} was collected while it still had a strong root", i)); // Catch nodes which we compute as alive but have been collected. if (m_marks[i] && m_collected[i]) Error(String.Format("Node {0} was collected while it was still reachable", i)); // Catch nodes which we compute as dead but haven't been collected. if (!m_marks[i] && !m_collected[i]) Error(String.Format("Node {0} wasn't collected even though it was unreachable", i)); } } // Method called by nodes when they're finalized (i.e. the node has been collected). public void Collected(int id) { // Catch nodes which are collected twice. if (m_collected[id]) Error(String.Format("Node {0} collected twice", id)); m_collected[id] = true; } // Structure used to record the primary and secondary nodes in every dependent handle we allocated. Nodes // are identified by ID (their index into the node array). struct HandleSpec { public int m_primary; public int m_secondary; public void Set(int primary, int secondary) { m_primary = primary; m_secondary = secondary; } } int m_count; // Count of nodes in array TableStyle m_style; // Style of handle creation Node[] m_nodes; // Array of nodes bool[] m_collected; // Array indicating which nodes have been collected bool[] m_marks; // Array indicating which nodes should be live ConditionalWeakTable<Node, Node> m_table; // Table that creates and holds our dependent handles int m_handleCount; // Number of handles we create HandleSpec[] m_handles; // Array of descriptions of each handle Random m_rng; // Random number generator } // The type of object we reference from our dependent handles. Doesn't do much except report its own garbage // collection to the owning TestSet. class Node { // Allocate a node and remember our owner (TestSet) and ID (index into node array). public Node(TestSet owner, int id) { m_owner = owner; m_id = id; } // On finalization report our collection to the owner TestSet. ~Node() { m_owner.Collected(m_id); } TestSet m_owner; // TestSet which created us int m_id; // Our index into above TestSet's node array } // The test class itself. class DhTest1 { // Entry point. public static int Main() { // The actual test runs are controlled from RunTest. True is returned if all succeeded, false // otherwise. if (new DhTest1().RunTest()) { Console.WriteLine("Test PASS"); return 100; } else { Console.WriteLine("Test FAIL"); return 999; } } // Run a series of tests with different table and collection styles. bool RunTest() { // Number of nodes we'll allocate in each run (we could take this as an argument instead). int numNodes = 10000; // Run everything under an exception handler since test errors are reported as exceptions. try { // Run a pass with each table style. For each style run through the collection styles in the order // None, Alternate, Random and All. This sequence is carefully selected to remove progressively // more nodes from the array (since, within a given TestSet instance, once a node has actually // been collected it won't be resurrected for future runs). TestSet ts1 = new TestSet(TableStyle.Unconnected, numNodes); ts1.Run(CollectStyle.None); ts1.Run(CollectStyle.Alternate); ts1.Run(CollectStyle.Random); ts1.Run(CollectStyle.All); TestSet ts2 = new TestSet(TableStyle.ForwardLinked, numNodes); ts2.Run(CollectStyle.None); ts2.Run(CollectStyle.Alternate); ts2.Run(CollectStyle.Random); ts2.Run(CollectStyle.All); TestSet ts3 = new TestSet(TableStyle.BackwardLinked, numNodes); ts3.Run(CollectStyle.None); ts3.Run(CollectStyle.Alternate); ts3.Run(CollectStyle.Random); ts3.Run(CollectStyle.All); TestSet ts4 = new TestSet(TableStyle.Random, numNodes); ts4.Run(CollectStyle.None); ts4.Run(CollectStyle.Alternate); ts4.Run(CollectStyle.Random); ts4.Run(CollectStyle.All); } catch (TestException te) { // "Expected" errors. Console.WriteLine("TestError: {0}", te.Message); return false; } catch (Exception e) { // Totally unexpected errors (probably shouldn't see these unless there's a test bug). Console.WriteLine("Unexpected exception: {0}", e.GetType().Name); return false; } // If we get as far as here the test succeeded. return true; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // Basic test for dependent handles. // // Note that though this test uses ConditionalWeakTable it is not a test for that class. This is a stress // test that utilizes ConditionalWeakTable features, which would be used heavily if Dynamic Language Runtime // catches on. // // Basic test overview: // * Allocate an array of objects (we call these Nodes) with finalizers. // * Create a set of dependent handles that reference these objects as primary and secondary members (this is // where ConditionalWeakTable comes in, adding a key/value pair to such a table creates a dependent handle // with the primary set to the key and the secondary set to the value). // * Null out selected objects from the array in various patterns. This removes the only normal strong root // for such objects (leaving only the dependent handles to provide additional roots). // * Perform a full GC and wait for it and finalization to complete. Each object which is collected will use // its finalizer to inform the test that it's been disposed of. // * Run our own reachability analysis (a simple mark array approach) to build a picture of which objects in // the array should have been collected or not. // * Validate that the actual set of live objects matches our computed live set exactly. // // Test variations include the number of objects allocated, the relationship between the primary and secondary // in each handle we allocate and the pattern with which we null out object references in the array. // // Additionally this test stresses substantially more complex code paths in the GC if server mode is enabled. // This can be achieved by setting the environment variable COMPlus_BuildFlavor=svr prior to executing the // test executable. // // Note that we don't go to any lengths to ensure that dependent handle ownership is spread over multiple cpus // on a server GC/MP test run. For large node counts (e.g. 100000) this happens naturally since initialization // takes a while with multiple thread/CPU switches involved. We could be more explicit here (allocate handles // using CPU affinitized threads) but if we do that we'd probably better look into different patterns of node // ownership to avoid unintentionally restricting our test coverage. // // Another area into which we could look deeper is trying to force mark stack overflows in the GC (presumably // by allocating complex object graphs with lots of interconnections, though I don't the specifics of the best // way to force this). Causing mark stack overflows should open up a class of bug the old dependent handle // implementation was subject to without requiring server GC mode or multiple CPUs. // using System; using System.Runtime.CompilerServices; // How we assign nodes to dependent handles. enum TableStyle { Unconnected, // The primary and secondary handles are assigned completely disjoint objects ForwardLinked, // The primary of each handle is the secondary of the previous handle BackwardLinked, // The primary of each handle is the secondary of the next handle Random // The primaries are each object in sequence, the secondaries are selected randomly from // the same set } // How we choose object references in the array to null out (and thus potentially become collected). enum CollectStyle { None, // Don't null out any (nothing should be collected) All, // Null them all out (any remaining live objects should be collected) Alternate, // Null out every second reference Random // Null out each entry with a 50% probability } // We report errors by throwing an exception. Define our own Exception subclass so we can identify these // errors unambiguously. class TestException : Exception { // We just supply a simple message string on error. public TestException(string message) : base(message) { } } // Class encapsulating test runs over a set of objects/handles allocated with the specified TableStyle. class TestSet { // Create a new test with the given table style and object count. public TestSet(TableStyle ts, int count) { // Use one random number generator for the life of the test. Could support explicit seeds for // reproducible tests here. m_rng = new Random(); // Remember our parameters. m_count = count; m_style = ts; // Various arrays. m_nodes = new Node[count]; // The array of objects m_collected = new bool[count]; // Records whether each object has been collected (entries are set by // the finalizer on Node) m_marks = new bool[count]; // Array used during individual test runs to calculate whether each // object should still be alive (allocated once here to avoid // injecting further garbage collections at run time) // Allocate each object (Node). Each knows its own unique ID (the index into the node array) and has a // back pointer to this test object (so it can phone home to report its own collection at finalization // time). for (int i = 0; i < count; i++) m_nodes[i] = new Node(this, i); // Determine how many handles we need to allocate given the number of nodes. This varies based on the // table style. switch (ts) { case TableStyle.Unconnected: // Primaries and secondaries are completely different objects so we split our nodes in half and // allocate that many handles. m_handleCount = count / 2; break; case TableStyle.ForwardLinked: // Nodes are primaries in one handle and secondary in another except one that falls off the end. // So we have as many handles as nodes - 1. m_handleCount = count - 1; break; case TableStyle.BackwardLinked: // Nodes are primaries in one handle and secondary in another except one that falls off the end. // So we have as many handles as nodes - 1. m_handleCount = count - 1; break; case TableStyle.Random: // Each node is a primary in some handle (secondaries are selected from amongst all the same nodes // randomly). So we have as many nodes as handles. m_handleCount = count; break; } // Allocate an array of HandleSpecs. These aren't the real handles, just structures that allow us // remember what's in each handle (in terms of the node index number for the primary and secondary). // We need to track this information separately because we can't access the real handles directly // (ConditionalWeakTable hides them) and we need to recall exactly what the primary and secondary of // each handle is so we can compute our own notion of object liveness later. m_handles = new HandleSpec[m_handleCount]; // Initialize the handle specs to assign objects to handles based on the table style. for (int i = 0; i < m_handleCount; i++) { int primary = -1, secondary = -1; switch (ts) { case TableStyle.Unconnected: // Assign adjacent nodes to the primary and secondary of each handle. primary = i * 2; secondary = (i * 2) + 1; break; case TableStyle.ForwardLinked: // Primary of each handle is the secondary of the last handle. primary = i; secondary = i + 1; break; case TableStyle.BackwardLinked: // Primary of each handle is the secondary of the next handle. primary = i + 1; secondary = i; break; case TableStyle.Random: // Primary is each node in sequence, secondary is any of the nodes randomly. primary = i; secondary = m_rng.Next(m_handleCount); break; } m_handles[i].Set(primary, secondary); } // Allocate a ConditionalWeakTable mapping Node keys to Node values. m_table = new ConditionalWeakTable<Node, Node>(); // Using our handle specs computed above add each primary/secondary node pair to the // ConditionalWeakTable in turn. This causes the ConditionalWeakTable to allocate a dependent handle // for each entry with the primary and secondary objects we specified as keys and values (note that // this scheme prevents us from creating multiple handles with the same primary though if this is // desired we could achieve it by allocating multiple ConditionalWeakTables). for (int i = 0; i < m_handleCount; i++) m_table.Add(m_nodes[m_handles[i].m_primary], m_nodes[m_handles[i].m_secondary]); } // Call this method to indicate a test error with a given message. This will terminate the test // immediately. void Error(string message) { throw new TestException(message); } // Run a single test pass on the node set. Null out node references according to the given CollectStyle, // run a garbage collection and then verify that each node is either live or dead as we predict. Take care // of the order in which test runs are made against a single TestSet: e.g. running a CollectStyle.All will // collect all nodes, rendering further runs relatively uninteresting. public void Run(CollectStyle cs) { Console.WriteLine("Running test TS:{0} CS:{1} {2} entries...", Enum.GetName(typeof(TableStyle), m_style), Enum.GetName(typeof(CollectStyle), cs), m_count); // Iterate over the array of nodes deciding for each whether to sever the reference (null out the // entry). for (int i = 0; i < m_count; i++) { bool sever = false; switch (cs) { case CollectStyle.All: // Sever all references. sever = true; break; case CollectStyle.None: // Don't sever any references. break; case CollectStyle.Alternate: // Sever every second reference (starting with the first). if ((i % 2) == 0) sever = true; break; case CollectStyle.Random: // Sever any reference with a 50% probability. if (m_rng.Next(100) > 50) sever = true; break; } if (sever) m_nodes[i] = null; } // Initialize a full GC and wait for all finalizers to complete (so we get an accurate picture of // which nodes were collected). GC.Collect(); GC.WaitForPendingFinalizers(); GC.WaitForPendingFinalizers(); // the above call may correspond to a GC prior to the Collect above, call it again // Calculate our own view of which nodes should be alive or dead. Use a simple mark array for this. // Once the algorithm is complete a true value at a given index in the array indicates a node that // should still be alive, otherwise the node should have been collected. // Initialize the mark array. Set true for nodes we still have a strong reference to from the array // (these should definitely not have been collected yet). Set false for the other nodes (we assume // they must have been collected until we prove otherwise). for (int i = 0; i < m_count; i++) m_marks[i] = m_nodes[i] != null; // Perform multiple passes over the handles we allocated (or our recorded version of the handles at // least). If we find a handle with a marked (live) primary where the secondary is not yet marked then // go ahead and mark that secondary (dependent handles are defined to do this: primaries act as if // they have a strong reference to the secondary up until the point they are collected). Repeat this // until we manage a scan over the entire table without marking any additional nodes as live. At this // point the marks array should reflect which objects are still live. while (true) { // Assume we're not going any further nodes to mark as live. bool marked = false; // Look at each handle in turn. for (int i = 0; i < m_handleCount; i++) if (m_marks[m_handles[i].m_primary]) { // Primary is live. if (!m_marks[m_handles[i].m_secondary]) { // Secondary wasn't marked as live yet. Do so and remember that we marked at least // node as live this pass (so we need to loop again since this secondary could be the // same as a primary earlier in the table). m_marks[m_handles[i].m_secondary] = true; marked = true; } } // Terminate the loop if we scanned the entire table without marking any additional nodes as live // (since additional scans can't make any difference). if (!marked) break; } // Validate our view of node liveness (m_marks) correspond to reality (m_nodes and m_collected). for (int i = 0; i < m_count; i++) { // Catch nodes which still have strong references but have collected anyway. This is stricly a // subset of the next test but it would be a very interesting bug to call out. if (m_nodes[i] != null && m_collected[i]) Error(String.Format("Node {0} was collected while it still had a strong root", i)); // Catch nodes which we compute as alive but have been collected. if (m_marks[i] && m_collected[i]) Error(String.Format("Node {0} was collected while it was still reachable", i)); // Catch nodes which we compute as dead but haven't been collected. if (!m_marks[i] && !m_collected[i]) Error(String.Format("Node {0} wasn't collected even though it was unreachable", i)); } } // Method called by nodes when they're finalized (i.e. the node has been collected). public void Collected(int id) { // Catch nodes which are collected twice. if (m_collected[id]) Error(String.Format("Node {0} collected twice", id)); m_collected[id] = true; } // Structure used to record the primary and secondary nodes in every dependent handle we allocated. Nodes // are identified by ID (their index into the node array). struct HandleSpec { public int m_primary; public int m_secondary; public void Set(int primary, int secondary) { m_primary = primary; m_secondary = secondary; } } int m_count; // Count of nodes in array TableStyle m_style; // Style of handle creation Node[] m_nodes; // Array of nodes bool[] m_collected; // Array indicating which nodes have been collected bool[] m_marks; // Array indicating which nodes should be live ConditionalWeakTable<Node, Node> m_table; // Table that creates and holds our dependent handles int m_handleCount; // Number of handles we create HandleSpec[] m_handles; // Array of descriptions of each handle Random m_rng; // Random number generator } // The type of object we reference from our dependent handles. Doesn't do much except report its own garbage // collection to the owning TestSet. class Node { // Allocate a node and remember our owner (TestSet) and ID (index into node array). public Node(TestSet owner, int id) { m_owner = owner; m_id = id; } // On finalization report our collection to the owner TestSet. ~Node() { m_owner.Collected(m_id); } TestSet m_owner; // TestSet which created us int m_id; // Our index into above TestSet's node array } // The test class itself. class DhTest1 { // Entry point. public static int Main() { // The actual test runs are controlled from RunTest. True is returned if all succeeded, false // otherwise. if (new DhTest1().RunTest()) { Console.WriteLine("Test PASS"); return 100; } else { Console.WriteLine("Test FAIL"); return 999; } } // Run a series of tests with different table and collection styles. bool RunTest() { // Number of nodes we'll allocate in each run (we could take this as an argument instead). int numNodes = 10000; // Run everything under an exception handler since test errors are reported as exceptions. try { // Run a pass with each table style. For each style run through the collection styles in the order // None, Alternate, Random and All. This sequence is carefully selected to remove progressively // more nodes from the array (since, within a given TestSet instance, once a node has actually // been collected it won't be resurrected for future runs). TestSet ts1 = new TestSet(TableStyle.Unconnected, numNodes); ts1.Run(CollectStyle.None); ts1.Run(CollectStyle.Alternate); ts1.Run(CollectStyle.Random); ts1.Run(CollectStyle.All); TestSet ts2 = new TestSet(TableStyle.ForwardLinked, numNodes); ts2.Run(CollectStyle.None); ts2.Run(CollectStyle.Alternate); ts2.Run(CollectStyle.Random); ts2.Run(CollectStyle.All); TestSet ts3 = new TestSet(TableStyle.BackwardLinked, numNodes); ts3.Run(CollectStyle.None); ts3.Run(CollectStyle.Alternate); ts3.Run(CollectStyle.Random); ts3.Run(CollectStyle.All); TestSet ts4 = new TestSet(TableStyle.Random, numNodes); ts4.Run(CollectStyle.None); ts4.Run(CollectStyle.Alternate); ts4.Run(CollectStyle.Random); ts4.Run(CollectStyle.All); } catch (TestException te) { // "Expected" errors. Console.WriteLine("TestError: {0}", te.Message); return false; } catch (Exception e) { // Totally unexpected errors (probably shouldn't see these unless there's a test bug). Console.WriteLine("Unexpected exception: {0}", e.GetType().Name); return false; } // If we get as far as here the test succeeded. return true; } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/pal/tests/palsuite/file_io/SetEndOfFile/test4/setendoffile.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: setendoffile.c (test 4) ** ** Purpose: Tests the PAL implementation of the SetEndOfFile function. ** Verify that the file pointer is the same before ** and after a SetEndOfFile using SetFilePointer with ** FILE_BEGIN, FILE_CURRENT and FILE_END ** ** **===================================================================*/ #include <palsuite.h> #define szStringTest "The quick fox jumped over the lazy dog's back." #define szTextFile "test.tmp" static void Cleanup(HANDLE hFile) { if (!CloseHandle(hFile)) { Trace("SetEndOfFile: ERROR -> Unable to close file \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); } if (!DeleteFileA(szTextFile)) { Trace("SetEndOfFile: ERROR -> Unable to delete file \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); } } static void DoTest(HANDLE hFile, DWORD dwOffset, DWORD dwMethod) { DWORD dwFP1 = 0; DWORD dwFP2 = 0; DWORD dwError; /* set the pointer*/ dwFP1 = SetFilePointer(hFile, dwOffset, NULL, dwMethod); if ((dwFP1 == INVALID_SET_FILE_POINTER) && ((dwError = GetLastError()) != ERROR_SUCCESS)) { Trace("SetEndOfFile: ERROR -> Unable to set the pointer to the " "end of the file. GetLastError returned %u.\n", dwError); Cleanup(hFile); Fail(""); } /* set EOF */ if (!SetEndOfFile(hFile)) { Trace("SetEndOfFile: ERROR -> Unable to set end of file. " "GetLastError returned %u.\n", GetLastError()); Cleanup(hFile); Fail(""); } /* get current file pointer pointer */ dwFP2 = SetFilePointer(hFile, 0, NULL, FILE_CURRENT); if ((dwFP1 == INVALID_SET_FILE_POINTER) && ((dwError = GetLastError()) != ERROR_SUCCESS)) { Trace("SetEndOfFile: ERROR -> Unable to set the pointer to the " "end of the file. GetLastError returned %u.\n", dwError); Cleanup(hFile); Fail(""); } /* are they the same? */ if (dwFP1 != dwFP2) { Trace("SetEndOfFile: ERROR -> File pointer before (%u) the " "SetEndOfFile call was different than after (%u).\n", dwFP1, dwFP2); Cleanup(hFile); Fail(""); } } PALTEST(file_io_SetEndOfFile_test4_paltest_setendoffile_test4, "file_io/SetEndOfFile/test4/paltest_setendoffile_test4") { HANDLE hFile = NULL; DWORD dwBytesWritten; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create a test file */ hFile = CreateFile(szTextFile, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("SetEndOfFile: ERROR -> Unable to create file \"%s\". " "GetLastError returned %u.\n", szTextFile, GetLastError()); } if (!WriteFile(hFile, szStringTest, strlen(szStringTest), &dwBytesWritten, NULL)) { Trace("SetEndOfFile: ERROR -> Unable to write to \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); Cleanup(hFile); Fail(""); } DoTest(hFile, -2, FILE_END); /* test the end */ DoTest(hFile, -10, FILE_CURRENT); /* test the middle-ish */ DoTest(hFile, 0, FILE_BEGIN); /* test the start */ Cleanup(hFile); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: setendoffile.c (test 4) ** ** Purpose: Tests the PAL implementation of the SetEndOfFile function. ** Verify that the file pointer is the same before ** and after a SetEndOfFile using SetFilePointer with ** FILE_BEGIN, FILE_CURRENT and FILE_END ** ** **===================================================================*/ #include <palsuite.h> #define szStringTest "The quick fox jumped over the lazy dog's back." #define szTextFile "test.tmp" static void Cleanup(HANDLE hFile) { if (!CloseHandle(hFile)) { Trace("SetEndOfFile: ERROR -> Unable to close file \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); } if (!DeleteFileA(szTextFile)) { Trace("SetEndOfFile: ERROR -> Unable to delete file \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); } } static void DoTest(HANDLE hFile, DWORD dwOffset, DWORD dwMethod) { DWORD dwFP1 = 0; DWORD dwFP2 = 0; DWORD dwError; /* set the pointer*/ dwFP1 = SetFilePointer(hFile, dwOffset, NULL, dwMethod); if ((dwFP1 == INVALID_SET_FILE_POINTER) && ((dwError = GetLastError()) != ERROR_SUCCESS)) { Trace("SetEndOfFile: ERROR -> Unable to set the pointer to the " "end of the file. GetLastError returned %u.\n", dwError); Cleanup(hFile); Fail(""); } /* set EOF */ if (!SetEndOfFile(hFile)) { Trace("SetEndOfFile: ERROR -> Unable to set end of file. " "GetLastError returned %u.\n", GetLastError()); Cleanup(hFile); Fail(""); } /* get current file pointer pointer */ dwFP2 = SetFilePointer(hFile, 0, NULL, FILE_CURRENT); if ((dwFP1 == INVALID_SET_FILE_POINTER) && ((dwError = GetLastError()) != ERROR_SUCCESS)) { Trace("SetEndOfFile: ERROR -> Unable to set the pointer to the " "end of the file. GetLastError returned %u.\n", dwError); Cleanup(hFile); Fail(""); } /* are they the same? */ if (dwFP1 != dwFP2) { Trace("SetEndOfFile: ERROR -> File pointer before (%u) the " "SetEndOfFile call was different than after (%u).\n", dwFP1, dwFP2); Cleanup(hFile); Fail(""); } } PALTEST(file_io_SetEndOfFile_test4_paltest_setendoffile_test4, "file_io/SetEndOfFile/test4/paltest_setendoffile_test4") { HANDLE hFile = NULL; DWORD dwBytesWritten; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create a test file */ hFile = CreateFile(szTextFile, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("SetEndOfFile: ERROR -> Unable to create file \"%s\". " "GetLastError returned %u.\n", szTextFile, GetLastError()); } if (!WriteFile(hFile, szStringTest, strlen(szStringTest), &dwBytesWritten, NULL)) { Trace("SetEndOfFile: ERROR -> Unable to write to \"%s\". ", "GetLastError returned %u.\n", szTextFile, GetLastError()); Cleanup(hFile); Fail(""); } DoTest(hFile, -2, FILE_END); /* test the end */ DoTest(hFile, -10, FILE_CURRENT); /* test the middle-ish */ DoTest(hFile, 0, FILE_BEGIN); /* test the start */ Cleanup(hFile); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/nativeaot/Runtime/event.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. class CLREventStatic { public: bool CreateManualEventNoThrow(bool bInitialState); bool CreateAutoEventNoThrow(bool bInitialState); bool CreateOSManualEventNoThrow(bool bInitialState); bool CreateOSAutoEventNoThrow(bool bInitialState); void CloseEvent(); bool IsValid() const; bool Set(); bool Reset(); uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable, bool bAllowReentrantWait = false); HANDLE GetOSEvent(); private: HANDLE m_hEvent; bool m_fInitialized; };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. class CLREventStatic { public: bool CreateManualEventNoThrow(bool bInitialState); bool CreateAutoEventNoThrow(bool bInitialState); bool CreateOSManualEventNoThrow(bool bInitialState); bool CreateOSAutoEventNoThrow(bool bInitialState); void CloseEvent(); bool IsValid() const; bool Set(); bool Reset(); uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable, bool bAllowReentrantWait = false); HANDLE GetOSEvent(); private: HANDLE m_hEvent; bool m_fInitialized; };
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/Regression/JitBlue/GitHub_14455/GitHub_14455.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/libraries/System.Security.Permissions/src/System/Data/Common/DBDataPermissionAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security.Permissions; namespace System.Data.Common { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif [AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Constructor| AttributeTargets.Method, AllowMultiple =true, Inherited =false)] public abstract class DBDataPermissionAttribute : CodeAccessSecurityAttribute { protected DBDataPermissionAttribute(SecurityAction action) : base(action) { } public bool AllowBlankPassword { get; set; } public string ConnectionString { get; set; } public KeyRestrictionBehavior KeyRestrictionBehavior { get; set; } public string KeyRestrictions { get; set; } [System.ComponentModel.EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public bool ShouldSerializeConnectionString() { return false; } [System.ComponentModel.EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public bool ShouldSerializeKeyRestrictions() { return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Security.Permissions; namespace System.Data.Common { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif [AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Constructor| AttributeTargets.Method, AllowMultiple =true, Inherited =false)] public abstract class DBDataPermissionAttribute : CodeAccessSecurityAttribute { protected DBDataPermissionAttribute(SecurityAction action) : base(action) { } public bool AllowBlankPassword { get; set; } public string ConnectionString { get; set; } public KeyRestrictionBehavior KeyRestrictionBehavior { get; set; } public string KeyRestrictions { get; set; } [System.ComponentModel.EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public bool ShouldSerializeConnectionString() { return false; } [System.ComponentModel.EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public bool ShouldSerializeKeyRestrictions() { return false; } } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/jit64/mcc/interop/mcc_i05.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Runtime.Extensions { auto } .assembly extern xunit.core {} .assembly extern mscorlib { auto } .assembly 'mcc_i05' {} .namespace MCCTest { .class MyClass { .method assembly static pinvokeimpl("native_i0s" as "#1" stdcall) valuetype MCCTest.VType0 Sum(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) cil managed preservesig { } .method private static int32 Main(string[] args) { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 64 .locals init ( [0] valuetype MCCTest.VType0 res, [1] int32 rc ) ldc.i8 1 ldc.i8 2 ldc.i8 3 ldc.i8 4 ldc.i8 5 ldc.i8 6 ldc.i8 7 ldc.i8 8 ldc.i8 9 ldc.i8 10 ldc.i8 11 ldc.i8 12 ldftn valuetype MCCTest.VType0 MCCTest.MyClass::Sum(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) calli valuetype MCCTest.VType0(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) stloc.s res // Check Result ldloc.s res ldc.i4 12 call int32 MCCTest.Common::CheckResult(valuetype MCCTest.VType0, int32) stloc.s rc ldloc.s rc ret } // end of method MyClass::Main } // end of class MyClass } // end of namespace MCCTest
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Runtime.Extensions { auto } .assembly extern xunit.core {} .assembly extern mscorlib { auto } .assembly 'mcc_i05' {} .namespace MCCTest { .class MyClass { .method assembly static pinvokeimpl("native_i0s" as "#1" stdcall) valuetype MCCTest.VType0 Sum(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) cil managed preservesig { } .method private static int32 Main(string[] args) { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 64 .locals init ( [0] valuetype MCCTest.VType0 res, [1] int32 rc ) ldc.i8 1 ldc.i8 2 ldc.i8 3 ldc.i8 4 ldc.i8 5 ldc.i8 6 ldc.i8 7 ldc.i8 8 ldc.i8 9 ldc.i8 10 ldc.i8 11 ldc.i8 12 ldftn valuetype MCCTest.VType0 MCCTest.MyClass::Sum(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) calli valuetype MCCTest.VType0(unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64, unsigned int64) stloc.s res // Check Result ldloc.s res ldc.i4 12 call int32 MCCTest.Common::CheckResult(valuetype MCCTest.VType0, int32) stloc.s rc ldloc.s rc ret } // end of method MyClass::Main } // end of class MyClass } // end of namespace MCCTest
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/JsonSerializerContext.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text.Json.Serialization.Metadata; namespace System.Text.Json.Serialization { /// <summary> /// Provides metadata about a set of types that is relevant to JSON serialization. /// </summary> public abstract partial class JsonSerializerContext { private bool? _canUseSerializationLogic; internal JsonSerializerOptions? _options; /// <summary> /// Gets the run time specified options of the context. If no options were passed /// when instanciating the context, then a new instance is bound and returned. /// </summary> /// <remarks> /// The instance cannot be mutated once it is bound with the context instance. /// </remarks> public JsonSerializerOptions Options => _options ??= new JsonSerializerOptions { JsonSerializerContext = this }; /// <summary> /// Indicates whether pre-generated serialization logic for types in the context /// is compatible with the run time specified <see cref="JsonSerializerOptions"/>. /// </summary> internal bool CanUseSerializationLogic { get { if (!_canUseSerializationLogic.HasValue) { if (GeneratedSerializerOptions == null) { _canUseSerializationLogic = false; } else { _canUseSerializationLogic = // Guard against unsupported features Options.Converters.Count == 0 && Options.Encoder == null && // Disallow custom number handling we'd need to honor when writing. // AllowReadingFromString and Strict are fine since there's no action to take when writing. (Options.NumberHandling & (JsonNumberHandling.WriteAsString | JsonNumberHandling.AllowNamedFloatingPointLiterals)) == 0 && Options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.None && #pragma warning disable SYSLIB0020 !Options.IgnoreNullValues && // This property is obsolete. #pragma warning restore SYSLIB0020 // Ensure options values are consistent with expected defaults. Options.DefaultIgnoreCondition == GeneratedSerializerOptions.DefaultIgnoreCondition && Options.IgnoreReadOnlyFields == GeneratedSerializerOptions.IgnoreReadOnlyFields && Options.IgnoreReadOnlyProperties == GeneratedSerializerOptions.IgnoreReadOnlyProperties && Options.IncludeFields == GeneratedSerializerOptions.IncludeFields && Options.PropertyNamingPolicy == GeneratedSerializerOptions.PropertyNamingPolicy && Options.DictionaryKeyPolicy == GeneratedSerializerOptions.DictionaryKeyPolicy && Options.WriteIndented == GeneratedSerializerOptions.WriteIndented; } } return _canUseSerializationLogic.Value; } } /// <summary> /// The default run time options for the context. Its values are defined at design-time via <see cref="JsonSourceGenerationOptionsAttribute"/>. /// </summary> protected abstract JsonSerializerOptions? GeneratedSerializerOptions { get; } /// <summary> /// Creates an instance of <see cref="JsonSerializerContext"/> and binds it with the indicated <see cref="JsonSerializerOptions"/>. /// </summary> /// <param name="options">The run time provided options for the context instance.</param> /// <remarks> /// If no instance options are passed, then no options are set until the context is bound using <see cref="JsonSerializerOptions.AddContext{TContext}"/>, /// or until <see cref="Options"/> is called, where a new options instance is created and bound. /// </remarks> protected JsonSerializerContext(JsonSerializerOptions? options) { if (options != null) { options.JsonSerializerContext = this; _options = options; } } /// <summary> /// Returns a <see cref="JsonTypeInfo"/> instance representing the given type. /// </summary> /// <param name="type">The type to fetch metadata about.</param> /// <returns>The metadata for the specified type, or <see langword="null" /> if the context has no metadata for the type.</returns> public abstract JsonTypeInfo? GetTypeInfo(Type type); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text.Json.Serialization.Metadata; namespace System.Text.Json.Serialization { /// <summary> /// Provides metadata about a set of types that is relevant to JSON serialization. /// </summary> public abstract partial class JsonSerializerContext { private bool? _canUseSerializationLogic; internal JsonSerializerOptions? _options; /// <summary> /// Gets the run time specified options of the context. If no options were passed /// when instanciating the context, then a new instance is bound and returned. /// </summary> /// <remarks> /// The instance cannot be mutated once it is bound with the context instance. /// </remarks> public JsonSerializerOptions Options => _options ??= new JsonSerializerOptions { JsonSerializerContext = this }; /// <summary> /// Indicates whether pre-generated serialization logic for types in the context /// is compatible with the run time specified <see cref="JsonSerializerOptions"/>. /// </summary> internal bool CanUseSerializationLogic { get { if (!_canUseSerializationLogic.HasValue) { if (GeneratedSerializerOptions == null) { _canUseSerializationLogic = false; } else { _canUseSerializationLogic = // Guard against unsupported features Options.Converters.Count == 0 && Options.Encoder == null && // Disallow custom number handling we'd need to honor when writing. // AllowReadingFromString and Strict are fine since there's no action to take when writing. (Options.NumberHandling & (JsonNumberHandling.WriteAsString | JsonNumberHandling.AllowNamedFloatingPointLiterals)) == 0 && Options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.None && #pragma warning disable SYSLIB0020 !Options.IgnoreNullValues && // This property is obsolete. #pragma warning restore SYSLIB0020 // Ensure options values are consistent with expected defaults. Options.DefaultIgnoreCondition == GeneratedSerializerOptions.DefaultIgnoreCondition && Options.IgnoreReadOnlyFields == GeneratedSerializerOptions.IgnoreReadOnlyFields && Options.IgnoreReadOnlyProperties == GeneratedSerializerOptions.IgnoreReadOnlyProperties && Options.IncludeFields == GeneratedSerializerOptions.IncludeFields && Options.PropertyNamingPolicy == GeneratedSerializerOptions.PropertyNamingPolicy && Options.DictionaryKeyPolicy == GeneratedSerializerOptions.DictionaryKeyPolicy && Options.WriteIndented == GeneratedSerializerOptions.WriteIndented; } } return _canUseSerializationLogic.Value; } } /// <summary> /// The default run time options for the context. Its values are defined at design-time via <see cref="JsonSourceGenerationOptionsAttribute"/>. /// </summary> protected abstract JsonSerializerOptions? GeneratedSerializerOptions { get; } /// <summary> /// Creates an instance of <see cref="JsonSerializerContext"/> and binds it with the indicated <see cref="JsonSerializerOptions"/>. /// </summary> /// <param name="options">The run time provided options for the context instance.</param> /// <remarks> /// If no instance options are passed, then no options are set until the context is bound using <see cref="JsonSerializerOptions.AddContext{TContext}"/>, /// or until <see cref="Options"/> is called, where a new options instance is created and bound. /// </remarks> protected JsonSerializerContext(JsonSerializerOptions? options) { if (options != null) { options.JsonSerializerContext = this; _options = options; } } /// <summary> /// Returns a <see cref="JsonTypeInfo"/> instance representing the given type. /// </summary> /// <param name="type">The type to fetch metadata about.</param> /// <returns>The metadata for the specified type, or <see langword="null" /> if the context has no metadata for the type.</returns> public abstract JsonTypeInfo? GetTypeInfo(Type type); } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/libraries/System.Private.CoreLib/src/System/Span.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Versioning; using EditorBrowsableAttribute = System.ComponentModel.EditorBrowsableAttribute; using EditorBrowsableState = System.ComponentModel.EditorBrowsableState; #pragma warning disable 0809 //warning CS0809: Obsolete member 'Span<T>.Equals(object)' overrides non-obsolete member 'object.Equals(object)' namespace System { /// <summary> /// Span represents a contiguous region of arbitrary memory. Unlike arrays, it can point to either managed /// or native memory, or to memory allocated on the stack. It is type- and memory-safe. /// </summary> [DebuggerTypeProxy(typeof(SpanDebugView<>))] [DebuggerDisplay("{ToString(),raw}")] [NonVersionable] public readonly ref struct Span<T> { /// <summary>A byref or a native ptr.</summary> internal readonly ByReference<T> _pointer; /// <summary>The number of elements this Span contains.</summary> private readonly int _length; /// <summary> /// Creates a new span over the entirety of the target array. /// </summary> /// <param name="array">The target array.</param> /// <remarks>Returns default when <paramref name="array"/> is null.</remarks> /// <exception cref="System.ArrayTypeMismatchException">Thrown when <paramref name="array"/> is covariant and array's type is not exactly T[].</exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span(T[]? array) { if (array == null) { this = default; return; // returns default } if (!typeof(T).IsValueType && array.GetType() != typeof(T[])) ThrowHelper.ThrowArrayTypeMismatchException(); _pointer = new ByReference<T>(ref MemoryMarshal.GetArrayDataReference(array)); _length = array.Length; } /// <summary> /// Creates a new span over the portion of the target array beginning /// at 'start' index and ending at 'end' index (exclusive). /// </summary> /// <param name="array">The target array.</param> /// <param name="start">The index at which to begin the span.</param> /// <param name="length">The number of items in the span.</param> /// <remarks>Returns default when <paramref name="array"/> is null.</remarks> /// <exception cref="System.ArrayTypeMismatchException">Thrown when <paramref name="array"/> is covariant and array's type is not exactly T[].</exception> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> or end index is not in the range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span(T[]? array, int start, int length) { if (array == null) { if (start != 0 || length != 0) ThrowHelper.ThrowArgumentOutOfRangeException(); this = default; return; // returns default } if (!typeof(T).IsValueType && array.GetType() != typeof(T[])) ThrowHelper.ThrowArrayTypeMismatchException(); #if TARGET_64BIT // See comment in Span<T>.Slice for how this works. if ((ulong)(uint)start + (ulong)(uint)length > (ulong)(uint)array.Length) ThrowHelper.ThrowArgumentOutOfRangeException(); #else if ((uint)start > (uint)array.Length || (uint)length > (uint)(array.Length - start)) ThrowHelper.ThrowArgumentOutOfRangeException(); #endif _pointer = new ByReference<T>(ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(array), (nint)(uint)start /* force zero-extension */)); _length = length; } /// <summary> /// Creates a new span over the target unmanaged buffer. Clearly this /// is quite dangerous, because we are creating arbitrarily typed T's /// out of a void*-typed block of memory. And the length is not checked. /// But if this creation is correct, then all subsequent uses are correct. /// </summary> /// <param name="pointer">An unmanaged pointer to memory.</param> /// <param name="length">The number of <typeparamref name="T"/> elements the memory contains.</param> /// <exception cref="System.ArgumentException"> /// Thrown when <typeparamref name="T"/> is reference type or contains pointers and hence cannot be stored in unmanaged memory. /// </exception> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="length"/> is negative. /// </exception> [CLSCompliant(false)] [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe Span(void* pointer, int length) { if (RuntimeHelpers.IsReferenceOrContainsReferences<T>()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T)); if (length < 0) ThrowHelper.ThrowArgumentOutOfRangeException(); _pointer = new ByReference<T>(ref Unsafe.As<byte, T>(ref *(byte*)pointer)); _length = length; } // Constructor for internal use only. [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Span(ref T ptr, int length) { Debug.Assert(length >= 0); _pointer = new ByReference<T>(ref ptr); _length = length; } /// <summary> /// Returns a reference to specified element of the Span. /// </summary> /// <param name="index"></param> /// <returns></returns> /// <exception cref="System.IndexOutOfRangeException"> /// Thrown when index less than 0 or index greater than or equal to Length /// </exception> public ref T this[int index] { [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] [NonVersionable] get { if ((uint)index >= (uint)_length) ThrowHelper.ThrowIndexOutOfRangeException(); return ref Unsafe.Add(ref _pointer.Value, (nint)(uint)index /* force zero-extension */); } } /// <summary> /// The number of items in the span. /// </summary> public int Length { [NonVersionable] get => _length; } /// <summary> /// Returns true if Length is 0. /// </summary> public bool IsEmpty { [NonVersionable] get => 0 >= (uint)_length; // Workaround for https://github.com/dotnet/runtime/issues/10950 } /// <summary> /// Returns false if left and right point at the same memory and have the same length. Note that /// this does *not* check to see if the *contents* are equal. /// </summary> public static bool operator !=(Span<T> left, Span<T> right) => !(left == right); /// <summary> /// This method is not supported as spans cannot be boxed. To compare two spans, use operator==. /// <exception cref="System.NotSupportedException"> /// Always thrown by this method. /// </exception> /// </summary> [Obsolete("Equals() on Span will always throw an exception. Use the equality operator instead.")] [EditorBrowsable(EditorBrowsableState.Never)] public override bool Equals(object? obj) => throw new NotSupportedException(SR.NotSupported_CannotCallEqualsOnSpan); /// <summary> /// This method is not supported as spans cannot be boxed. /// <exception cref="System.NotSupportedException"> /// Always thrown by this method. /// </exception> /// </summary> [Obsolete("GetHashCode() on Span will always throw an exception.")] [EditorBrowsable(EditorBrowsableState.Never)] public override int GetHashCode() => throw new NotSupportedException(SR.NotSupported_CannotCallGetHashCodeOnSpan); /// <summary> /// Defines an implicit conversion of an array to a <see cref="Span{T}"/> /// </summary> public static implicit operator Span<T>(T[]? array) => new Span<T>(array); /// <summary> /// Defines an implicit conversion of a <see cref="ArraySegment{T}"/> to a <see cref="Span{T}"/> /// </summary> public static implicit operator Span<T>(ArraySegment<T> segment) => new Span<T>(segment.Array, segment.Offset, segment.Count); /// <summary> /// Returns an empty <see cref="Span{T}"/> /// </summary> public static Span<T> Empty => default; /// <summary>Gets an enumerator for this span.</summary> public Enumerator GetEnumerator() => new Enumerator(this); /// <summary>Enumerates the elements of a <see cref="Span{T}"/>.</summary> public ref struct Enumerator { /// <summary>The span being enumerated.</summary> private readonly Span<T> _span; /// <summary>The next index to yield.</summary> private int _index; /// <summary>Initialize the enumerator.</summary> /// <param name="span">The span to enumerate.</param> [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Enumerator(Span<T> span) { _span = span; _index = -1; } /// <summary>Advances the enumerator to the next element of the span.</summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public bool MoveNext() { int index = _index + 1; if (index < _span.Length) { _index = index; return true; } return false; } /// <summary>Gets the element at the current position of the enumerator.</summary> public ref T Current { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => ref _span[_index]; } } /// <summary> /// Returns a reference to the 0th element of the Span. If the Span is empty, returns null reference. /// It can be used for pinning and is required to support the use of span within a fixed statement. /// </summary> [EditorBrowsable(EditorBrowsableState.Never)] public ref T GetPinnableReference() { // Ensure that the native code has just one forward branch that is predicted-not-taken. ref T ret = ref Unsafe.NullRef<T>(); if (_length != 0) ret = ref _pointer.Value; return ref ret; } /// <summary> /// Clears the contents of this span. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe void Clear() { if (RuntimeHelpers.IsReferenceOrContainsReferences<T>()) { SpanHelpers.ClearWithReferences(ref Unsafe.As<T, IntPtr>(ref _pointer.Value), (uint)_length * (nuint)(Unsafe.SizeOf<T>() / sizeof(nuint))); } else { SpanHelpers.ClearWithoutReferences(ref Unsafe.As<T, byte>(ref _pointer.Value), (uint)_length * (nuint)Unsafe.SizeOf<T>()); } } /// <summary> /// Fills the contents of this span with the given value. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Fill(T value) { if (Unsafe.SizeOf<T>() == 1) { // Special-case single-byte types like byte / sbyte / bool. // The runtime eventually calls memset, which can efficiently support large buffers. // We don't need to check IsReferenceOrContainsReferences because no references // can ever be stored in types this small. Unsafe.InitBlockUnaligned(ref Unsafe.As<T, byte>(ref _pointer.Value), Unsafe.As<T, byte>(ref value), (uint)_length); } else { // Call our optimized workhorse method for all other types. SpanHelpers.Fill(ref _pointer.Value, (uint)_length, value); } } /// <summary> /// Copies the contents of this span into destination span. If the source /// and destinations overlap, this method behaves as if the original values in /// a temporary location before the destination is overwritten. /// </summary> /// <param name="destination">The span to copy items into.</param> /// <exception cref="System.ArgumentException"> /// Thrown when the destination Span is shorter than the source Span. /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public void CopyTo(Span<T> destination) { // Using "if (!TryCopyTo(...))" results in two branches: one for the length // check, and one for the result of TryCopyTo. Since these checks are equivalent, // we can optimize by performing the check once ourselves then calling Memmove directly. if ((uint)_length <= (uint)destination.Length) { Buffer.Memmove(ref destination._pointer.Value, ref _pointer.Value, (uint)_length); } else { ThrowHelper.ThrowArgumentException_DestinationTooShort(); } } /// <summary> /// Copies the contents of this span into destination span. If the source /// and destinations overlap, this method behaves as if the original values in /// a temporary location before the destination is overwritten. /// </summary> /// <param name="destination">The span to copy items into.</param> /// <returns>If the destination span is shorter than the source span, this method /// return false and no data is written to the destination.</returns> public bool TryCopyTo(Span<T> destination) { bool retVal = false; if ((uint)_length <= (uint)destination.Length) { Buffer.Memmove(ref destination._pointer.Value, ref _pointer.Value, (uint)_length); retVal = true; } return retVal; } /// <summary> /// Returns true if left and right point at the same memory and have the same length. Note that /// this does *not* check to see if the *contents* are equal. /// </summary> public static bool operator ==(Span<T> left, Span<T> right) => left._length == right._length && Unsafe.AreSame<T>(ref left._pointer.Value, ref right._pointer.Value); /// <summary> /// Defines an implicit conversion of a <see cref="Span{T}"/> to a <see cref="ReadOnlySpan{T}"/> /// </summary> public static implicit operator ReadOnlySpan<T>(Span<T> span) => new ReadOnlySpan<T>(ref span._pointer.Value, span._length); /// <summary> /// For <see cref="Span{Char}"/>, returns a new instance of string that represents the characters pointed to by the span. /// Otherwise, returns a <see cref="string"/> with the name of the type and the number of elements. /// </summary> public override string ToString() { if (typeof(T) == typeof(char)) { return new string(new ReadOnlySpan<char>(ref Unsafe.As<T, char>(ref _pointer.Value), _length)); } return $"System.Span<{typeof(T).Name}>[{_length}]"; } /// <summary> /// Forms a slice out of the given span, beginning at 'start'. /// </summary> /// <param name="start">The index at which to begin this slice.</param> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> index is not in range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span<T> Slice(int start) { if ((uint)start > (uint)_length) ThrowHelper.ThrowArgumentOutOfRangeException(); return new Span<T>(ref Unsafe.Add(ref _pointer.Value, (nint)(uint)start /* force zero-extension */), _length - start); } /// <summary> /// Forms a slice out of the given span, beginning at 'start', of given length /// </summary> /// <param name="start">The index at which to begin this slice.</param> /// <param name="length">The desired length for the slice (exclusive).</param> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> or end index is not in range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span<T> Slice(int start, int length) { #if TARGET_64BIT // Since start and length are both 32-bit, their sum can be computed across a 64-bit domain // without loss of fidelity. The cast to uint before the cast to ulong ensures that the // extension from 32- to 64-bit is zero-extending rather than sign-extending. The end result // of this is that if either input is negative or if the input sum overflows past Int32.MaxValue, // that information is captured correctly in the comparison against the backing _length field. // We don't use this same mechanism in a 32-bit process due to the overhead of 64-bit arithmetic. if ((ulong)(uint)start + (ulong)(uint)length > (ulong)(uint)_length) ThrowHelper.ThrowArgumentOutOfRangeException(); #else if ((uint)start > (uint)_length || (uint)length > (uint)(_length - start)) ThrowHelper.ThrowArgumentOutOfRangeException(); #endif return new Span<T>(ref Unsafe.Add(ref _pointer.Value, (nint)(uint)start /* force zero-extension */), length); } /// <summary> /// Copies the contents of this span into a new array. This heap /// allocates, so should generally be avoided, however it is sometimes /// necessary to bridge the gap with APIs written in terms of arrays. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public T[] ToArray() { if (_length == 0) return Array.Empty<T>(); var destination = new T[_length]; Buffer.Memmove(ref MemoryMarshal.GetArrayDataReference(destination), ref _pointer.Value, (uint)_length); return destination; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Versioning; using EditorBrowsableAttribute = System.ComponentModel.EditorBrowsableAttribute; using EditorBrowsableState = System.ComponentModel.EditorBrowsableState; #pragma warning disable 0809 //warning CS0809: Obsolete member 'Span<T>.Equals(object)' overrides non-obsolete member 'object.Equals(object)' namespace System { /// <summary> /// Span represents a contiguous region of arbitrary memory. Unlike arrays, it can point to either managed /// or native memory, or to memory allocated on the stack. It is type- and memory-safe. /// </summary> [DebuggerTypeProxy(typeof(SpanDebugView<>))] [DebuggerDisplay("{ToString(),raw}")] [NonVersionable] public readonly ref struct Span<T> { /// <summary>A byref or a native ptr.</summary> internal readonly ByReference<T> _pointer; /// <summary>The number of elements this Span contains.</summary> private readonly int _length; /// <summary> /// Creates a new span over the entirety of the target array. /// </summary> /// <param name="array">The target array.</param> /// <remarks>Returns default when <paramref name="array"/> is null.</remarks> /// <exception cref="System.ArrayTypeMismatchException">Thrown when <paramref name="array"/> is covariant and array's type is not exactly T[].</exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span(T[]? array) { if (array == null) { this = default; return; // returns default } if (!typeof(T).IsValueType && array.GetType() != typeof(T[])) ThrowHelper.ThrowArrayTypeMismatchException(); _pointer = new ByReference<T>(ref MemoryMarshal.GetArrayDataReference(array)); _length = array.Length; } /// <summary> /// Creates a new span over the portion of the target array beginning /// at 'start' index and ending at 'end' index (exclusive). /// </summary> /// <param name="array">The target array.</param> /// <param name="start">The index at which to begin the span.</param> /// <param name="length">The number of items in the span.</param> /// <remarks>Returns default when <paramref name="array"/> is null.</remarks> /// <exception cref="System.ArrayTypeMismatchException">Thrown when <paramref name="array"/> is covariant and array's type is not exactly T[].</exception> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> or end index is not in the range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span(T[]? array, int start, int length) { if (array == null) { if (start != 0 || length != 0) ThrowHelper.ThrowArgumentOutOfRangeException(); this = default; return; // returns default } if (!typeof(T).IsValueType && array.GetType() != typeof(T[])) ThrowHelper.ThrowArrayTypeMismatchException(); #if TARGET_64BIT // See comment in Span<T>.Slice for how this works. if ((ulong)(uint)start + (ulong)(uint)length > (ulong)(uint)array.Length) ThrowHelper.ThrowArgumentOutOfRangeException(); #else if ((uint)start > (uint)array.Length || (uint)length > (uint)(array.Length - start)) ThrowHelper.ThrowArgumentOutOfRangeException(); #endif _pointer = new ByReference<T>(ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(array), (nint)(uint)start /* force zero-extension */)); _length = length; } /// <summary> /// Creates a new span over the target unmanaged buffer. Clearly this /// is quite dangerous, because we are creating arbitrarily typed T's /// out of a void*-typed block of memory. And the length is not checked. /// But if this creation is correct, then all subsequent uses are correct. /// </summary> /// <param name="pointer">An unmanaged pointer to memory.</param> /// <param name="length">The number of <typeparamref name="T"/> elements the memory contains.</param> /// <exception cref="System.ArgumentException"> /// Thrown when <typeparamref name="T"/> is reference type or contains pointers and hence cannot be stored in unmanaged memory. /// </exception> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="length"/> is negative. /// </exception> [CLSCompliant(false)] [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe Span(void* pointer, int length) { if (RuntimeHelpers.IsReferenceOrContainsReferences<T>()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T)); if (length < 0) ThrowHelper.ThrowArgumentOutOfRangeException(); _pointer = new ByReference<T>(ref Unsafe.As<byte, T>(ref *(byte*)pointer)); _length = length; } // Constructor for internal use only. [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Span(ref T ptr, int length) { Debug.Assert(length >= 0); _pointer = new ByReference<T>(ref ptr); _length = length; } /// <summary> /// Returns a reference to specified element of the Span. /// </summary> /// <param name="index"></param> /// <returns></returns> /// <exception cref="System.IndexOutOfRangeException"> /// Thrown when index less than 0 or index greater than or equal to Length /// </exception> public ref T this[int index] { [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] [NonVersionable] get { if ((uint)index >= (uint)_length) ThrowHelper.ThrowIndexOutOfRangeException(); return ref Unsafe.Add(ref _pointer.Value, (nint)(uint)index /* force zero-extension */); } } /// <summary> /// The number of items in the span. /// </summary> public int Length { [NonVersionable] get => _length; } /// <summary> /// Returns true if Length is 0. /// </summary> public bool IsEmpty { [NonVersionable] get => 0 >= (uint)_length; // Workaround for https://github.com/dotnet/runtime/issues/10950 } /// <summary> /// Returns false if left and right point at the same memory and have the same length. Note that /// this does *not* check to see if the *contents* are equal. /// </summary> public static bool operator !=(Span<T> left, Span<T> right) => !(left == right); /// <summary> /// This method is not supported as spans cannot be boxed. To compare two spans, use operator==. /// <exception cref="System.NotSupportedException"> /// Always thrown by this method. /// </exception> /// </summary> [Obsolete("Equals() on Span will always throw an exception. Use the equality operator instead.")] [EditorBrowsable(EditorBrowsableState.Never)] public override bool Equals(object? obj) => throw new NotSupportedException(SR.NotSupported_CannotCallEqualsOnSpan); /// <summary> /// This method is not supported as spans cannot be boxed. /// <exception cref="System.NotSupportedException"> /// Always thrown by this method. /// </exception> /// </summary> [Obsolete("GetHashCode() on Span will always throw an exception.")] [EditorBrowsable(EditorBrowsableState.Never)] public override int GetHashCode() => throw new NotSupportedException(SR.NotSupported_CannotCallGetHashCodeOnSpan); /// <summary> /// Defines an implicit conversion of an array to a <see cref="Span{T}"/> /// </summary> public static implicit operator Span<T>(T[]? array) => new Span<T>(array); /// <summary> /// Defines an implicit conversion of a <see cref="ArraySegment{T}"/> to a <see cref="Span{T}"/> /// </summary> public static implicit operator Span<T>(ArraySegment<T> segment) => new Span<T>(segment.Array, segment.Offset, segment.Count); /// <summary> /// Returns an empty <see cref="Span{T}"/> /// </summary> public static Span<T> Empty => default; /// <summary>Gets an enumerator for this span.</summary> public Enumerator GetEnumerator() => new Enumerator(this); /// <summary>Enumerates the elements of a <see cref="Span{T}"/>.</summary> public ref struct Enumerator { /// <summary>The span being enumerated.</summary> private readonly Span<T> _span; /// <summary>The next index to yield.</summary> private int _index; /// <summary>Initialize the enumerator.</summary> /// <param name="span">The span to enumerate.</param> [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Enumerator(Span<T> span) { _span = span; _index = -1; } /// <summary>Advances the enumerator to the next element of the span.</summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public bool MoveNext() { int index = _index + 1; if (index < _span.Length) { _index = index; return true; } return false; } /// <summary>Gets the element at the current position of the enumerator.</summary> public ref T Current { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => ref _span[_index]; } } /// <summary> /// Returns a reference to the 0th element of the Span. If the Span is empty, returns null reference. /// It can be used for pinning and is required to support the use of span within a fixed statement. /// </summary> [EditorBrowsable(EditorBrowsableState.Never)] public ref T GetPinnableReference() { // Ensure that the native code has just one forward branch that is predicted-not-taken. ref T ret = ref Unsafe.NullRef<T>(); if (_length != 0) ret = ref _pointer.Value; return ref ret; } /// <summary> /// Clears the contents of this span. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe void Clear() { if (RuntimeHelpers.IsReferenceOrContainsReferences<T>()) { SpanHelpers.ClearWithReferences(ref Unsafe.As<T, IntPtr>(ref _pointer.Value), (uint)_length * (nuint)(Unsafe.SizeOf<T>() / sizeof(nuint))); } else { SpanHelpers.ClearWithoutReferences(ref Unsafe.As<T, byte>(ref _pointer.Value), (uint)_length * (nuint)Unsafe.SizeOf<T>()); } } /// <summary> /// Fills the contents of this span with the given value. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Fill(T value) { if (Unsafe.SizeOf<T>() == 1) { // Special-case single-byte types like byte / sbyte / bool. // The runtime eventually calls memset, which can efficiently support large buffers. // We don't need to check IsReferenceOrContainsReferences because no references // can ever be stored in types this small. Unsafe.InitBlockUnaligned(ref Unsafe.As<T, byte>(ref _pointer.Value), Unsafe.As<T, byte>(ref value), (uint)_length); } else { // Call our optimized workhorse method for all other types. SpanHelpers.Fill(ref _pointer.Value, (uint)_length, value); } } /// <summary> /// Copies the contents of this span into destination span. If the source /// and destinations overlap, this method behaves as if the original values in /// a temporary location before the destination is overwritten. /// </summary> /// <param name="destination">The span to copy items into.</param> /// <exception cref="System.ArgumentException"> /// Thrown when the destination Span is shorter than the source Span. /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public void CopyTo(Span<T> destination) { // Using "if (!TryCopyTo(...))" results in two branches: one for the length // check, and one for the result of TryCopyTo. Since these checks are equivalent, // we can optimize by performing the check once ourselves then calling Memmove directly. if ((uint)_length <= (uint)destination.Length) { Buffer.Memmove(ref destination._pointer.Value, ref _pointer.Value, (uint)_length); } else { ThrowHelper.ThrowArgumentException_DestinationTooShort(); } } /// <summary> /// Copies the contents of this span into destination span. If the source /// and destinations overlap, this method behaves as if the original values in /// a temporary location before the destination is overwritten. /// </summary> /// <param name="destination">The span to copy items into.</param> /// <returns>If the destination span is shorter than the source span, this method /// return false and no data is written to the destination.</returns> public bool TryCopyTo(Span<T> destination) { bool retVal = false; if ((uint)_length <= (uint)destination.Length) { Buffer.Memmove(ref destination._pointer.Value, ref _pointer.Value, (uint)_length); retVal = true; } return retVal; } /// <summary> /// Returns true if left and right point at the same memory and have the same length. Note that /// this does *not* check to see if the *contents* are equal. /// </summary> public static bool operator ==(Span<T> left, Span<T> right) => left._length == right._length && Unsafe.AreSame<T>(ref left._pointer.Value, ref right._pointer.Value); /// <summary> /// Defines an implicit conversion of a <see cref="Span{T}"/> to a <see cref="ReadOnlySpan{T}"/> /// </summary> public static implicit operator ReadOnlySpan<T>(Span<T> span) => new ReadOnlySpan<T>(ref span._pointer.Value, span._length); /// <summary> /// For <see cref="Span{Char}"/>, returns a new instance of string that represents the characters pointed to by the span. /// Otherwise, returns a <see cref="string"/> with the name of the type and the number of elements. /// </summary> public override string ToString() { if (typeof(T) == typeof(char)) { return new string(new ReadOnlySpan<char>(ref Unsafe.As<T, char>(ref _pointer.Value), _length)); } return $"System.Span<{typeof(T).Name}>[{_length}]"; } /// <summary> /// Forms a slice out of the given span, beginning at 'start'. /// </summary> /// <param name="start">The index at which to begin this slice.</param> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> index is not in range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span<T> Slice(int start) { if ((uint)start > (uint)_length) ThrowHelper.ThrowArgumentOutOfRangeException(); return new Span<T>(ref Unsafe.Add(ref _pointer.Value, (nint)(uint)start /* force zero-extension */), _length - start); } /// <summary> /// Forms a slice out of the given span, beginning at 'start', of given length /// </summary> /// <param name="start">The index at which to begin this slice.</param> /// <param name="length">The desired length for the slice (exclusive).</param> /// <exception cref="System.ArgumentOutOfRangeException"> /// Thrown when the specified <paramref name="start"/> or end index is not in range (&lt;0 or &gt;Length). /// </exception> [MethodImpl(MethodImplOptions.AggressiveInlining)] public Span<T> Slice(int start, int length) { #if TARGET_64BIT // Since start and length are both 32-bit, their sum can be computed across a 64-bit domain // without loss of fidelity. The cast to uint before the cast to ulong ensures that the // extension from 32- to 64-bit is zero-extending rather than sign-extending. The end result // of this is that if either input is negative or if the input sum overflows past Int32.MaxValue, // that information is captured correctly in the comparison against the backing _length field. // We don't use this same mechanism in a 32-bit process due to the overhead of 64-bit arithmetic. if ((ulong)(uint)start + (ulong)(uint)length > (ulong)(uint)_length) ThrowHelper.ThrowArgumentOutOfRangeException(); #else if ((uint)start > (uint)_length || (uint)length > (uint)(_length - start)) ThrowHelper.ThrowArgumentOutOfRangeException(); #endif return new Span<T>(ref Unsafe.Add(ref _pointer.Value, (nint)(uint)start /* force zero-extension */), length); } /// <summary> /// Copies the contents of this span into a new array. This heap /// allocates, so should generally be avoided, however it is sometimes /// necessary to bridge the gap with APIs written in terms of arrays. /// </summary> [MethodImpl(MethodImplOptions.AggressiveInlining)] public T[] ToArray() { if (_length == 0) return Array.Empty<T>(); var destination = new T[_length]; Buffer.Memmove(ref MemoryMarshal.GetArrayDataReference(destination), ref _pointer.Value, (uint)_length); return destination; } } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/mono/mono/sgen/sgen-simple-nursery.c
/** * \file * Simple always promote nursery. * * Copyright 2001-2003 Ximian, Inc * Copyright 2003-2010 Novell, Inc. * Copyright 2011 Xamarin Inc (http://www.xamarin.com) * Copyright (C) 2012 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "config.h" #ifdef HAVE_SGEN_GC #include <string.h> #include "mono/sgen/sgen-gc.h" #include "mono/sgen/sgen-protocol.h" #include "mono/sgen/sgen-layout-stats.h" #include "mono/sgen/sgen-client.h" #include "mono/sgen/sgen-workers.h" #include "mono/utils/mono-memory-model.h" #include "mono/utils/mono-proclib.h" static GCObject* alloc_for_promotion (GCVTable vtable, GCObject *obj, size_t objsize, gboolean has_references) { sgen_total_promoted_size += objsize; return sgen_major_collector.alloc_object (vtable, objsize, has_references); } static GCObject* alloc_for_promotion_par (GCVTable vtable, GCObject *obj, size_t objsize, gboolean has_references) { /* * FIXME * Note that the stat is not precise. sgen_total_promoted_size incrementing is not atomic and * even in that case, the same object might be promoted simultaneously by different workers * leading to one of the allocated major object to be discarded. */ sgen_total_promoted_size += objsize; return sgen_major_collector.alloc_object_par (vtable, objsize, has_references); } static SgenFragment* build_fragments_get_exclude_head (void) { return NULL; } static void build_fragments_release_exclude_head (void) { } static void build_fragments_finish (SgenFragmentAllocator *allocator) { } static void prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size) { } static void clear_fragments (void) { } static void init_nursery (SgenFragmentAllocator *allocator, char *start, char *end) { char *nursery_limit = sgen_nursery_start + sgen_nursery_size; if (start < nursery_limit && end > nursery_limit) { sgen_fragment_allocator_add (allocator, start, nursery_limit); sgen_fragment_allocator_add (allocator, nursery_limit, end); } else { sgen_fragment_allocator_add (allocator, start, end); } } /******************************************Copy/Scan functins ************************************************/ #define collector_pin_object(obj, queue) sgen_pin_object (obj, queue); #define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION alloc_for_promotion #define COLLECTOR_PARALLEL_ALLOC_FOR_PROMOTION alloc_for_promotion_par #define COPY_OR_MARK_PARALLEL #include "sgen-copy-object.h" #define SGEN_SIMPLE_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_serial_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #ifndef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC #define SGEN_SIMPLE_PAR_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_parallel_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #undef SGEN_SIMPLE_PAR_NURSERY #define SGEN_CONCURRENT_MAJOR #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_serial_with_concurrent_major_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #define SGEN_SIMPLE_PAR_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_parallel_with_concurrent_major_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #endif void sgen_simple_nursery_init (SgenMinorCollector *collector, gboolean parallel) { if (mono_cpu_count () <= 1) parallel = FALSE; #ifdef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC g_assert (parallel == FALSE); #endif collector->is_split = FALSE; collector->is_parallel = parallel; collector->alloc_for_promotion = alloc_for_promotion; collector->alloc_for_promotion_par = alloc_for_promotion_par; collector->prepare_to_space = prepare_to_space; collector->clear_fragments = clear_fragments; collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head; collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head; collector->build_fragments_finish = build_fragments_finish; collector->init_nursery = init_nursery; fill_serial_ops (&collector->serial_ops); #ifndef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC fill_serial_with_concurrent_major_ops (&collector->serial_ops_with_concurrent_major); fill_parallel_ops (&collector->parallel_ops); fill_parallel_with_concurrent_major_ops (&collector->parallel_ops_with_concurrent_major); /* * The nursery worker context is created first so it will have priority over * concurrent mark and concurrent sweep. */ if (parallel) sgen_workers_create_context (GENERATION_NURSERY, mono_cpu_count ()); #endif } #endif
/** * \file * Simple always promote nursery. * * Copyright 2001-2003 Ximian, Inc * Copyright 2003-2010 Novell, Inc. * Copyright 2011 Xamarin Inc (http://www.xamarin.com) * Copyright (C) 2012 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "config.h" #ifdef HAVE_SGEN_GC #include <string.h> #include "mono/sgen/sgen-gc.h" #include "mono/sgen/sgen-protocol.h" #include "mono/sgen/sgen-layout-stats.h" #include "mono/sgen/sgen-client.h" #include "mono/sgen/sgen-workers.h" #include "mono/utils/mono-memory-model.h" #include "mono/utils/mono-proclib.h" static GCObject* alloc_for_promotion (GCVTable vtable, GCObject *obj, size_t objsize, gboolean has_references) { sgen_total_promoted_size += objsize; return sgen_major_collector.alloc_object (vtable, objsize, has_references); } static GCObject* alloc_for_promotion_par (GCVTable vtable, GCObject *obj, size_t objsize, gboolean has_references) { /* * FIXME * Note that the stat is not precise. sgen_total_promoted_size incrementing is not atomic and * even in that case, the same object might be promoted simultaneously by different workers * leading to one of the allocated major object to be discarded. */ sgen_total_promoted_size += objsize; return sgen_major_collector.alloc_object_par (vtable, objsize, has_references); } static SgenFragment* build_fragments_get_exclude_head (void) { return NULL; } static void build_fragments_release_exclude_head (void) { } static void build_fragments_finish (SgenFragmentAllocator *allocator) { } static void prepare_to_space (char *to_space_bitmap, size_t space_bitmap_size) { } static void clear_fragments (void) { } static void init_nursery (SgenFragmentAllocator *allocator, char *start, char *end) { char *nursery_limit = sgen_nursery_start + sgen_nursery_size; if (start < nursery_limit && end > nursery_limit) { sgen_fragment_allocator_add (allocator, start, nursery_limit); sgen_fragment_allocator_add (allocator, nursery_limit, end); } else { sgen_fragment_allocator_add (allocator, start, end); } } /******************************************Copy/Scan functins ************************************************/ #define collector_pin_object(obj, queue) sgen_pin_object (obj, queue); #define COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION alloc_for_promotion #define COLLECTOR_PARALLEL_ALLOC_FOR_PROMOTION alloc_for_promotion_par #define COPY_OR_MARK_PARALLEL #include "sgen-copy-object.h" #define SGEN_SIMPLE_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_serial_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #ifndef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC #define SGEN_SIMPLE_PAR_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_parallel_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #undef SGEN_SIMPLE_PAR_NURSERY #define SGEN_CONCURRENT_MAJOR #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_serial_with_concurrent_major_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #define SGEN_SIMPLE_PAR_NURSERY #include "sgen-minor-copy-object.h" #include "sgen-minor-scan-object.h" static void fill_parallel_with_concurrent_major_ops (SgenObjectOperations *ops) { ops->copy_or_mark_object = SERIAL_COPY_OBJECT; FILL_MINOR_COLLECTOR_SCAN_OBJECT (ops); } #endif void sgen_simple_nursery_init (SgenMinorCollector *collector, gboolean parallel) { if (mono_cpu_count () <= 1) parallel = FALSE; #ifdef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC g_assert (parallel == FALSE); #endif collector->is_split = FALSE; collector->is_parallel = parallel; collector->alloc_for_promotion = alloc_for_promotion; collector->alloc_for_promotion_par = alloc_for_promotion_par; collector->prepare_to_space = prepare_to_space; collector->clear_fragments = clear_fragments; collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head; collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head; collector->build_fragments_finish = build_fragments_finish; collector->init_nursery = init_nursery; fill_serial_ops (&collector->serial_ops); #ifndef DISABLE_SGEN_MAJOR_MARKSWEEP_CONC fill_serial_with_concurrent_major_ops (&collector->serial_ops_with_concurrent_major); fill_parallel_ops (&collector->parallel_ops); fill_parallel_with_concurrent_major_ops (&collector->parallel_ops_with_concurrent_major); /* * The nursery worker context is created first so it will have priority over * concurrent mark and concurrent sweep. */ if (parallel) sgen_workers_create_context (GENERATION_NURSERY, mono_cpu_count ()); #endif } #endif
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/Methodical/divrem/rem/decimalrem_cs_ro.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="decimalrem.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="decimalrem.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/libraries/System.IO/tests/StreamWriter/StreamWriter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.IO.Tests { public partial class WriteTests { [Fact] public void Synchronized_NewObject() { using (Stream str = CreateStream()) { StreamWriter writer = new StreamWriter(str); TextWriter synced = TextWriter.Synchronized(writer); Assert.NotEqual(writer, synced); writer.Write("app"); synced.Write("pad"); writer.Flush(); synced.Flush(); str.Position = 0; StreamReader reader = new StreamReader(str); Assert.Equal("apppad", reader.ReadLine()); } } private class TestFormatWriter : StreamWriter { public int WriteCalls { get; private set; } public int WriteLineCalls { get; private set; } public TestFormatWriter(Stream stream) : base(stream) { } public override void Write(string value) { WriteCalls++; base.Write(value); } public override void WriteLine(string value) { WriteLineCalls++; base.WriteLine(value); } } [Fact] public void FormatOverloadsCallWrite() { TestFormatWriter writer = new TestFormatWriter(new MemoryStream()); writer.Write("{0}", "Zero"); Assert.Equal(1, writer.WriteCalls); writer.Write("{0}{1}", "Zero", "One"); Assert.Equal(2, writer.WriteCalls); writer.Write("{0}{1}{2}", "Zero", "One", "Two"); Assert.Equal(3, writer.WriteCalls); writer.Write("{0}{1}{2}{3}", "Zero", "One", "Two", "Three"); Assert.Equal(4, writer.WriteCalls); writer.Write("{0}{1}{2}{3}{4}", "Zero", "One", "Two", "Three", "Four"); Assert.Equal(5, writer.WriteCalls); writer.WriteLine("{0}", "Zero"); Assert.Equal(1, writer.WriteLineCalls); writer.WriteLine("{0}{1}", "Zero", "One"); Assert.Equal(2, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}", "Zero", "One", "Two"); Assert.Equal(3, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}{3}", "Zero", "One", "Two", "Three"); Assert.Equal(4, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}{3}{4}", "Zero", "One", "Two", "Three", "Four"); Assert.Equal(5, writer.WriteLineCalls); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.IO.Tests { public partial class WriteTests { [Fact] public void Synchronized_NewObject() { using (Stream str = CreateStream()) { StreamWriter writer = new StreamWriter(str); TextWriter synced = TextWriter.Synchronized(writer); Assert.NotEqual(writer, synced); writer.Write("app"); synced.Write("pad"); writer.Flush(); synced.Flush(); str.Position = 0; StreamReader reader = new StreamReader(str); Assert.Equal("apppad", reader.ReadLine()); } } private class TestFormatWriter : StreamWriter { public int WriteCalls { get; private set; } public int WriteLineCalls { get; private set; } public TestFormatWriter(Stream stream) : base(stream) { } public override void Write(string value) { WriteCalls++; base.Write(value); } public override void WriteLine(string value) { WriteLineCalls++; base.WriteLine(value); } } [Fact] public void FormatOverloadsCallWrite() { TestFormatWriter writer = new TestFormatWriter(new MemoryStream()); writer.Write("{0}", "Zero"); Assert.Equal(1, writer.WriteCalls); writer.Write("{0}{1}", "Zero", "One"); Assert.Equal(2, writer.WriteCalls); writer.Write("{0}{1}{2}", "Zero", "One", "Two"); Assert.Equal(3, writer.WriteCalls); writer.Write("{0}{1}{2}{3}", "Zero", "One", "Two", "Three"); Assert.Equal(4, writer.WriteCalls); writer.Write("{0}{1}{2}{3}{4}", "Zero", "One", "Two", "Three", "Four"); Assert.Equal(5, writer.WriteCalls); writer.WriteLine("{0}", "Zero"); Assert.Equal(1, writer.WriteLineCalls); writer.WriteLine("{0}{1}", "Zero", "One"); Assert.Equal(2, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}", "Zero", "One", "Two"); Assert.Equal(3, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}{3}", "Zero", "One", "Two", "Three"); Assert.Equal(4, writer.WriteLineCalls); writer.WriteLine("{0}{1}{2}{3}{4}", "Zero", "One", "Two", "Three", "Four"); Assert.Equal(5, writer.WriteLineCalls); } } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftRightArithmetic.Vector64.Int32.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmetic_Vector64_Int32_1() { var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray, Int32[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Int32, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 testClass) { var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 testClass) { fixed (Vector64<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly byte Imm = 1; private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector64<Int32> _clsVar; private Vector64<Int32> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmetic( Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector64<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector64<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmetic( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int32>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); fixed (Vector64<Int32>* pFld = &test._fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmetic(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmetic)}<Int32>(Vector64<Int32>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightArithmetic_Vector64_Int32_1() { var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray, Int32[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Int32, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int32> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 testClass) { var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1 testClass) { fixed (Vector64<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly byte Imm = 1; private static Int32[] _data = new Int32[Op1ElementCount]; private static Vector64<Int32> _clsVar; private Vector64<Int32> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld), ref Unsafe.As<Int32, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightArithmetic( Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector64<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightArithmetic), new Type[] { typeof(Vector64<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightArithmetic( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int32>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<Int32>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((Int32*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightArithmetic(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightArithmetic_Vector64_Int32_1(); fixed (Vector64<Int32>* pFld = &test._fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightArithmetic(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int32>* pFld = &_fld) { var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightArithmetic( AdvSimd.LoadVector64((Int32*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int32> firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray = new Int32[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightArithmetic(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightArithmetic)}<Int32>(Vector64<Int32>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/libraries/System.Runtime.InteropServices/tests/LibraryImportGenerator.UnitTests/LibraryImportGenerator.Unit.Tests.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <IsPackable>false</IsPackable> <LangVersion>Preview</LangVersion> <Nullable>enable</Nullable> <TestRunRequiresLiveRefPack>true</TestRunRequiresLiveRefPack> </PropertyGroup> <ItemGroup> <Compile Include="$(CommonTestPath)SourceGenerators\LiveReferencePack.cs" Link="Common\SourceGenerators\LiveReferencePack.cs" /> </ItemGroup> <ItemGroup> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Workspaces" Version="$(MicrosoftCodeAnalysisVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Analyzer.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.CodeFix.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="coverlet.collector" Version="$(CoverletCollectorVersion)"> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <PrivateAssets>all</PrivateAssets> </PackageReference> </ItemGroup> <ItemGroup> <ProjectReference Include="..\Ancillary.Interop\Ancillary.Interop.csproj" /> <ProjectReference Include="..\..\gen\LibraryImportGenerator\LibraryImportGenerator.csproj" /> </ItemGroup> <ItemGroup> <None Include="$(RepoRoot)/NuGet.config" Link="NuGet.config" CopyToOutputDirectory="PreserveNewest" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <IsPackable>false</IsPackable> <LangVersion>Preview</LangVersion> <Nullable>enable</Nullable> <TestRunRequiresLiveRefPack>true</TestRunRequiresLiveRefPack> </PropertyGroup> <ItemGroup> <Compile Include="$(CommonTestPath)SourceGenerators\LiveReferencePack.cs" Link="Common\SourceGenerators\LiveReferencePack.cs" /> </ItemGroup> <ItemGroup> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Workspaces" Version="$(MicrosoftCodeAnalysisVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Analyzer.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.CodeFix.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="coverlet.collector" Version="$(CoverletCollectorVersion)"> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <PrivateAssets>all</PrivateAssets> </PackageReference> </ItemGroup> <ItemGroup> <ProjectReference Include="..\Ancillary.Interop\Ancillary.Interop.csproj" /> <ProjectReference Include="..\..\gen\LibraryImportGenerator\LibraryImportGenerator.csproj" /> </ItemGroup> <ItemGroup> <None Include="$(RepoRoot)/NuGet.config" Link="NuGet.config" CopyToOutputDirectory="PreserveNewest" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/Methodical/eh/leaves/labelbeforefinally_il_r.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="labelbeforefinally.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="labelbeforefinally.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/coreclr/jit/decomposelongs.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX DecomposeLongs XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX*/ // // This file contains code to decompose 64-bit LONG operations on 32-bit platforms // into multiple single-register operations so individual register usage and requirements // are explicit for LSRA. The rationale behind this is to avoid adding code complexity // downstream caused by the introduction of handling longs as special cases, // especially in LSRA. // // Long decomposition happens on a statement immediately prior to more general // purpose lowering. // #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifndef TARGET_64BIT // DecomposeLongs is only used on 32-bit platforms #include "decomposelongs.h" //------------------------------------------------------------------------ // DecomposeLongs::PrepareForDecomposition: // Do one-time preparation required for LONG decomposition. Namely, // promote long variables to multi-register structs. // // Arguments: // None // // Return Value: // None. // void DecomposeLongs::PrepareForDecomposition() { PromoteLongVars(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeBlock: // Do LONG decomposition on all the nodes in the given block. This must // be done before lowering the block, as decomposition can insert // additional nodes. // // Arguments: // block - the block to process // // Return Value: // None. // void DecomposeLongs::DecomposeBlock(BasicBlock* block) { assert(block == m_compiler->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_range = &LIR::AsRange(block); DecomposeRangeHelper(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeRange: // Do LONG decomposition on all the nodes in the given range. This must // be done before inserting a range of un-decomposed IR into a block // that has already been decomposed. // // Arguments: // compiler - The compiler context. // range - The range to decompose. // // Return Value: // None. // void DecomposeLongs::DecomposeRange(Compiler* compiler, LIR::Range& range) { assert(compiler != nullptr); DecomposeLongs decomposer(compiler); decomposer.m_range = &range; decomposer.DecomposeRangeHelper(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeRangeHelper: // Decompiose each node in the current range. // // Decomposition is done as an execution-order walk. Decomposition of // a particular node can create new nodes that need to be further // decomposed at higher levels. That is, decomposition "bubbles up" // through dataflow. // void DecomposeLongs::DecomposeRangeHelper() { assert(m_range != nullptr); GenTree* node = Range().FirstNode(); while (node != nullptr) { node = DecomposeNode(node); } assert(Range().CheckLIR(m_compiler, true)); } //------------------------------------------------------------------------ // DecomposeNode: Decompose long-type trees into lower and upper halves. // // Arguments: // tree - the tree that will, if needed, be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNode(GenTree* tree) { // Handle the case where we are implicitly using the lower half of a long lclVar. if ((tree->TypeGet() == TYP_INT) && tree->OperIsLocal()) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(tree->AsLclVarCommon()); if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { #ifdef DEBUG if (m_compiler->verbose) { printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted " "half:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG unsigned loVarNum = varDsc->lvFieldLclStart; tree->AsLclVarCommon()->SetLclNum(loVarNum); return tree->gtNext; } } if (tree->TypeGet() != TYP_LONG) { return tree->gtNext; } #ifdef DEBUG if (m_compiler->verbose) { printf("Decomposing TYP_LONG tree. BEFORE:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG LIR::Use use; if (!Range().TryGetUse(tree, &use)) { use = LIR::Use::GetDummyUse(Range(), tree); } GenTree* nextNode = nullptr; switch (tree->OperGet()) { case GT_LCL_VAR: nextNode = DecomposeLclVar(use); break; case GT_LCL_FLD: nextNode = DecomposeLclFld(use); break; case GT_STORE_LCL_VAR: nextNode = DecomposeStoreLclVar(use); break; case GT_CAST: nextNode = DecomposeCast(use); break; case GT_CNS_LNG: nextNode = DecomposeCnsLng(use); break; case GT_CALL: nextNode = DecomposeCall(use); break; case GT_RETURN: assert(tree->AsOp()->gtOp1->OperGet() == GT_LONG); break; case GT_STOREIND: nextNode = DecomposeStoreInd(use); break; case GT_STORE_LCL_FLD: nextNode = DecomposeStoreLclFld(use); break; case GT_IND: nextNode = DecomposeInd(use); break; case GT_NOT: nextNode = DecomposeNot(use); break; case GT_NEG: nextNode = DecomposeNeg(use); break; // Binary operators. Those that require different computation for upper and lower half are // handled by the use of GetHiOper(). case GT_ADD: case GT_SUB: case GT_OR: case GT_XOR: case GT_AND: nextNode = DecomposeArith(use); break; case GT_MUL: nextNode = DecomposeMul(use); break; case GT_UMOD: nextNode = DecomposeUMod(use); break; case GT_LSH: case GT_RSH: case GT_RSZ: nextNode = DecomposeShift(use); break; case GT_ROL: case GT_ROR: nextNode = DecomposeRotate(use); break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: nextNode = DecomposeHWIntrinsic(use); break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: NYI("Interlocked operations on TYP_LONG"); break; default: { JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::OpName(tree->OperGet())); assert(!"Illegal TYP_LONG node in Decomposition."); break; } } // If we replaced the argument to a GT_FIELD_LIST element with a GT_LONG node, split that field list // element into two elements: one for each half of the GT_LONG. if ((use.Def()->OperGet() == GT_LONG) && !use.IsDummyUse() && (use.User()->OperGet() == GT_FIELD_LIST)) { DecomposeFieldList(use.User()->AsFieldList(), use.Def()->AsOp()); } #ifdef DEBUG if (m_compiler->verbose) { // NOTE: st_lcl_var doesn't dump properly afterwards. printf("Decomposing TYP_LONG tree. AFTER:\n"); m_compiler->gtDispTreeRange(Range(), use.Def()); } #endif // When casting from a decomposed long to a smaller integer we can discard the high part. if (m_compiler->opts.OptimizationEnabled() && !use.IsDummyUse() && use.User()->OperIs(GT_CAST) && use.User()->TypeIs(TYP_INT) && use.Def()->OperIs(GT_LONG)) { nextNode = OptimizeCastFromDecomposedLong(use.User()->AsCast(), nextNode); } return nextNode; } //------------------------------------------------------------------------ // FinalizeDecomposition: A helper function to finalize LONG decomposition by // taking the resulting two halves of the decomposition, and tie them together // with a new GT_LONG node that will replace the original node. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // loResult - the decomposed low part // hiResult - the decomposed high part // insertResultAfter - the node that the GT_LONG should be inserted after // // Return Value: // The next node to process. // GenTree* DecomposeLongs::FinalizeDecomposition(LIR::Use& use, GenTree* loResult, GenTree* hiResult, GenTree* insertResultAfter) { assert(use.IsInitialized()); assert(loResult != nullptr); assert(hiResult != nullptr); assert(Range().Contains(loResult)); assert(Range().Contains(hiResult)); GenTree* gtLong = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loResult, hiResult); if (use.IsDummyUse()) { gtLong->SetUnusedValue(); } loResult->ClearUnusedValue(); hiResult->ClearUnusedValue(); Range().InsertAfter(insertResultAfter, gtLong); use.ReplaceWith(gtLong); return gtLong->gtNext; } //------------------------------------------------------------------------ // DecomposeLclVar: Decompose GT_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_LCL_VAR); GenTree* tree = use.Def(); unsigned varNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = m_compiler->lvaGetDesc(varNum); GenTree* loResult = tree; loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclLNode(varNum, TYP_INT); Range().InsertAfter(loResult, hiResult); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; loResult->AsLclVarCommon()->SetLclNum(loVarNum); hiResult->AsLclVarCommon()->SetLclNum(hiVarNum); } else { m_compiler->lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); loResult->SetOper(GT_LCL_FLD); loResult->AsLclFld()->SetLclOffs(0); loResult->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); hiResult->SetOper(GT_LCL_FLD); hiResult->AsLclFld()->SetLclOffs(4); hiResult->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeLclFld: Decompose GT_LCL_FLD. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeLclFld(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_LCL_FLD); GenTree* tree = use.Def(); GenTreeLclFld* loResult = tree->AsLclFld(); loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclFldNode(loResult->GetLclNum(), TYP_INT, loResult->GetLclOffs() + 4); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeStoreLclVar: Decompose GT_STORE_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STORE_LCL_VAR); GenTree* tree = use.Def(); GenTree* rhs = tree->gtGetOp1(); if (rhs->OperIs(GT_CALL) || (rhs->OperIs(GT_MUL_LONG) && (rhs->gtFlags & GTF_MUL_64RSLT) != 0)) { // GT_CALLs are not decomposed, so will not be converted to GT_LONG // GT_STORE_LCL_VAR = GT_CALL are handled in genMultiRegCallStoreToLocal // GT_MULs are not decomposed, so will not be converted to GT_LONG return tree->gtNext; } noway_assert(rhs->OperGet() == GT_LONG); const LclVarDsc* varDsc = m_compiler->lvaGetDesc(tree->AsLclVarCommon()); if (!varDsc->lvPromoted) { // We cannot decompose a st.lclVar that is not promoted because doing so // changes its liveness semantics. For example, consider the following // decomposition of a st.lclVar into two st.lclFlds: // // Before: // // /--* t0 int // +--* t1 int // t2 = * gt_long long // // /--* t2 long // * st.lclVar long V0 // // After: // /--* t0 int // * st.lclFld int V0 [+0] // // /--* t1 int // * st.lclFld int V0 [+4] // // Before decomposition, the `st.lclVar` is a simple def of `V0`. After // decomposition, each `st.lclFld` is a partial def of `V0`. This partial // def is treated as both a use and a def of the appropriate lclVar. This // difference will affect any situation in which the liveness of a variable // at a def matters (e.g. dead store elimination, live-in sets, etc.). As // a result, we leave these stores as-is and generate the decomposed store // in the code generator. // // NOTE: this does extend the lifetime of the low half of the `GT_LONG` // node as compared to the decomposed form. If we start doing more code // motion in the backend, this may cause some CQ issues and some sort of // decomposition could be beneficial. return tree->gtNext; } assert(varDsc->lvFieldCnt == 2); GenTreeOp* value = rhs->AsOp(); Range().Remove(value); const unsigned loVarNum = varDsc->lvFieldLclStart; GenTree* loStore = tree; loStore->AsLclVarCommon()->SetLclNum(loVarNum); loStore->AsOp()->gtOp1 = value->gtOp1; loStore->gtType = TYP_INT; const unsigned hiVarNum = loVarNum + 1; GenTree* hiStore = m_compiler->gtNewLclLNode(hiVarNum, TYP_INT); hiStore->SetOper(GT_STORE_LCL_VAR); hiStore->AsOp()->gtOp1 = value->gtOp2; hiStore->gtFlags |= GTF_VAR_DEF; Range().InsertAfter(tree, hiStore); return hiStore->gtNext; } //------------------------------------------------------------------------ // DecomposeStoreLclFld: Decompose GT_STORE_LCL_FLD. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreLclFld(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STORE_LCL_FLD); GenTreeLclFld* store = use.Def()->AsLclFld(); GenTreeOp* value = store->gtOp1->AsOp(); assert(value->OperGet() == GT_LONG); Range().Remove(value); // The original store node will be repurposed to store the low half of the GT_LONG. GenTreeLclFld* loStore = store; loStore->gtOp1 = value->gtOp1; loStore->gtType = TYP_INT; loStore->gtFlags |= GTF_VAR_USEASG; // Create the store for the upper half of the GT_LONG and insert it after the low store. GenTreeLclFld* hiStore = m_compiler->gtNewLclFldNode(loStore->GetLclNum(), TYP_INT, loStore->GetLclOffs() + 4); hiStore->SetOper(GT_STORE_LCL_FLD); hiStore->gtOp1 = value->gtOp2; hiStore->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); Range().InsertAfter(loStore, hiStore); return hiStore->gtNext; } //------------------------------------------------------------------------ // DecomposeCast: Decompose GT_CAST. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CAST); GenTree* cast = use.Def()->AsCast(); GenTree* loResult = nullptr; GenTree* hiResult = nullptr; var_types srcType = cast->CastFromType(); var_types dstType = cast->CastToType(); if ((cast->gtFlags & GTF_UNSIGNED) != 0) { srcType = varTypeToUnsigned(srcType); } bool skipDecomposition = false; if (varTypeIsLong(srcType)) { if (cast->gtOverflow() && (varTypeIsUnsigned(srcType) != varTypeIsUnsigned(dstType))) { GenTree* srcOp = cast->gtGetOp1(); noway_assert(srcOp->OperGet() == GT_LONG); GenTree* loSrcOp = srcOp->gtGetOp1(); GenTree* hiSrcOp = srcOp->gtGetOp2(); // // When casting between long types an overflow check is needed only if the types // have different signedness. In both cases (long->ulong and ulong->long) we only // need to check if the high part is negative or not. Use the existing cast node // to perform a int->uint cast of the high part to take advantage of the overflow // check provided by codegen. // const bool signExtend = (cast->gtFlags & GTF_UNSIGNED) == 0; loResult = EnsureIntSized(loSrcOp, signExtend); hiResult = cast; hiResult->gtType = TYP_INT; hiResult->AsCast()->gtCastType = TYP_UINT; hiResult->gtFlags &= ~GTF_UNSIGNED; hiResult->AsOp()->gtOp1 = hiSrcOp; Range().Remove(srcOp); } else { NYI("Unimplemented long->long no-op cast decomposition"); } } else if (varTypeIsIntegralOrI(srcType)) { if (cast->gtOverflow() && !varTypeIsUnsigned(srcType) && varTypeIsUnsigned(dstType)) { // // An overflow check is needed only when casting from a signed type to ulong. // Change the cast type to uint to take advantage of the overflow check provided // by codegen and then zero extend the resulting uint to ulong. // loResult = cast; loResult->AsCast()->gtCastType = TYP_UINT; loResult->gtType = TYP_INT; hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(loResult, hiResult); } else { if (!use.IsDummyUse() && (use.User()->OperGet() == GT_MUL)) { // // This int->long cast is used by a GT_MUL that will be transformed by DecomposeMul into a // GT_MUL_LONG and as a result the high operand produced by the cast will become dead. // Skip cast decomposition so DecomposeMul doesn't need to bother with dead code removal, // especially in the case of sign extending casts that also introduce new lclvars. // assert(use.User()->Is64RsltMul()); skipDecomposition = true; } else if (varTypeIsUnsigned(srcType)) { const bool signExtend = (cast->gtFlags & GTF_UNSIGNED) == 0; loResult = EnsureIntSized(cast->gtGetOp1(), signExtend); hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(cast, hiResult); Range().Remove(cast); } else { LIR::Use src(Range(), &(cast->AsOp()->gtOp1), cast); unsigned lclNum = src.ReplaceWithLclVar(m_compiler); loResult = src.Def(); GenTree* loCopy = m_compiler->gtNewLclvNode(lclNum, TYP_INT); GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, loCopy, shiftBy); Range().InsertAfter(cast, loCopy, shiftBy, hiResult); Range().Remove(cast); } } } else { NYI("Unimplemented cast decomposition"); } if (skipDecomposition) { return cast->gtNext; } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeCnsLng: Decompose GT_CNS_LNG. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCnsLng(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CNS_LNG); GenTree* tree = use.Def(); INT32 loVal = tree->AsLngCon()->LoVal(); INT32 hiVal = tree->AsLngCon()->HiVal(); GenTree* loResult = tree; loResult->BashToConst(loVal); GenTree* hiResult = m_compiler->gtNewIconNode(hiVal); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeFieldList: Decompose GT_FIELD_LIST. // // Arguments: // fieldList - the GT_FIELD_LIST node that uses the given GT_LONG node. // longNode - the node to decompose // // Return Value: // The next node to process. // // Notes: // Split a LONG field list element into two elements: one for each half of the GT_LONG. // GenTree* DecomposeLongs::DecomposeFieldList(GenTreeFieldList* fieldList, GenTreeOp* longNode) { assert(longNode->OperGet() == GT_LONG); GenTreeFieldList::Use* loUse = nullptr; for (GenTreeFieldList::Use& use : fieldList->Uses()) { if (use.GetNode() == longNode) { loUse = &use; break; } } assert(loUse != nullptr); Range().Remove(longNode); loUse->SetNode(longNode->gtGetOp1()); loUse->SetType(TYP_INT); fieldList->InsertFieldLIR(m_compiler, loUse, longNode->gtGetOp2(), loUse->GetOffset() + 4, TYP_INT); return fieldList->gtNext; } //------------------------------------------------------------------------ // DecomposeCall: Decompose GT_CALL. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCall(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CALL); // We only need to force var = call() if the call's result is used. return StoreNodeToVar(use); } //------------------------------------------------------------------------ // DecomposeStoreInd: Decompose GT_STOREIND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreInd(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STOREIND); GenTree* tree = use.Def(); assert(tree->AsOp()->gtOp2->OperGet() == GT_LONG); // Example input (address expression omitted): // // t51 = const int 0x37C05E7D // t154 = const int 0x2A0A3C80 // / --* t51 int // + --* t154 int // t155 = *gt_long long // / --* t52 byref // + --* t155 long // * storeIndir long GenTree* gtLong = tree->AsOp()->gtOp2; // Save address to a temp. It is used in storeIndLow and storeIndHigh trees. LIR::Use address(Range(), &tree->AsOp()->gtOp1, tree); address.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving address tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); if (!gtLong->AsOp()->gtOp1->OperIsLeaf()) { LIR::Use op1(Range(), &gtLong->AsOp()->gtOp1, gtLong); op1.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving low data tree to a temp var:\n"); DISPTREERANGE(Range(), op1.Def()); } if (!gtLong->AsOp()->gtOp2->OperIsLeaf()) { LIR::Use op2(Range(), &gtLong->AsOp()->gtOp2, gtLong); op2.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving high data tree to a temp var:\n"); DISPTREERANGE(Range(), op2.Def()); } GenTree* addrBase = tree->AsOp()->gtOp1; GenTree* dataHigh = gtLong->AsOp()->gtOp2; GenTree* dataLow = gtLong->AsOp()->gtOp1; GenTree* storeIndLow = tree; Range().Remove(gtLong); Range().Remove(dataHigh); storeIndLow->AsOp()->gtOp2 = dataLow; storeIndLow->gtType = TYP_INT; GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum()); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* storeIndHigh = new (m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh); storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK)); Range().InsertAfter(storeIndLow, dataHigh, addrBaseHigh, addrHigh, storeIndHigh); return storeIndHigh; // Example final output: // // /--* t52 byref // * st.lclVar byref V07 rat0 // t158 = lclVar byref V07 rat0 // t51 = const int 0x37C05E7D // /--* t158 byref // +--* t51 int // * storeIndir int // t154 = const int 0x2A0A3C80 // t159 = lclVar byref V07 rat0 // /--* t159 byref // t160 = * lea(b + 4) ref // /--* t154 int // +--* t160 ref // * storeIndir int } //------------------------------------------------------------------------ // DecomposeInd: Decompose GT_IND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeInd(LIR::Use& use) { GenTree* indLow = use.Def(); LIR::Use address(Range(), &indLow->AsOp()->gtOp1, indLow); address.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); // Change the type of lower ind. indLow->gtType = TYP_INT; // Create tree of ind(addr+4) GenTree* addrBase = indLow->gtGetOp1(); GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum()); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr); indHigh->gtFlags |= (indLow->gtFlags & (GTF_GLOB_REF | GTF_EXCEPT | GTF_IND_FLAGS)); Range().InsertAfter(indLow, addrBaseHigh, addrHigh, indHigh); return FinalizeDecomposition(use, indLow, indHigh, indHigh); } //------------------------------------------------------------------------ // DecomposeNot: Decompose GT_NOT. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNot(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_NOT); GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); noway_assert(gtLong->OperGet() == GT_LONG); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); Range().Remove(gtLong); GenTree* loResult = tree; loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; GenTree* hiResult = new (m_compiler, GT_NOT) GenTreeOp(GT_NOT, TYP_INT, hiOp1, nullptr); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeNeg: Decompose GT_NEG. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNeg(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_NEG); GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); noway_assert(gtLong->OperGet() == GT_LONG); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); Range().Remove(gtLong); GenTree* loResult = tree; loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; GenTree* zero = m_compiler->gtNewZeroConNode(TYP_INT); #if defined(TARGET_X86) GenTree* hiAdjust = m_compiler->gtNewOperNode(GT_ADD_HI, TYP_INT, hiOp1, zero); GenTree* hiResult = m_compiler->gtNewOperNode(GT_NEG, TYP_INT, hiAdjust); Range().InsertAfter(loResult, zero, hiAdjust, hiResult); loResult->gtFlags |= GTF_SET_FLAGS; hiAdjust->gtFlags |= GTF_USE_FLAGS; #elif defined(TARGET_ARM) // We tend to use "movs" to load zero to a register, and that sets the flags, so put the // zero before the loResult, which is setting the flags needed by GT_SUB_HI. GenTree* hiResult = m_compiler->gtNewOperNode(GT_SUB_HI, TYP_INT, zero, hiOp1); Range().InsertBefore(loResult, zero); Range().InsertAfter(loResult, hiResult); loResult->gtFlags |= GTF_SET_FLAGS; hiResult->gtFlags |= GTF_USE_FLAGS; #endif return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeArith: Decompose GT_ADD, GT_SUB, GT_OR, GT_XOR, GT_AND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeArith(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); // Both operands must have already been decomposed into GT_LONG operators. noway_assert((op1->OperGet() == GT_LONG) && (op2->OperGet() == GT_LONG)); // Capture the lo and hi halves of op1 and op2. GenTree* loOp1 = op1->gtGetOp1(); GenTree* hiOp1 = op1->gtGetOp2(); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); // Now, remove op1 and op2 from the node list. Range().Remove(op1); Range().Remove(op2); // We will reuse "tree" for the loResult, which will now be of TYP_INT, and its operands // will be the lo halves of op1 from above. GenTree* loResult = tree; loResult->SetOper(GetLoOper(oper)); loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; loResult->AsOp()->gtOp2 = loOp2; GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2); Range().InsertAfter(loResult, hiResult); if ((oper == GT_ADD) || (oper == GT_SUB)) { loResult->gtFlags |= GTF_SET_FLAGS; hiResult->gtFlags |= GTF_USE_FLAGS; if ((loResult->gtFlags & GTF_OVERFLOW) != 0) { hiResult->gtFlags |= GTF_OVERFLOW | GTF_EXCEPT; loResult->gtFlags &= ~(GTF_OVERFLOW | GTF_EXCEPT); } if (loResult->gtFlags & GTF_UNSIGNED) { hiResult->gtFlags |= GTF_UNSIGNED; } } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeShift: Decompose GT_LSH, GT_RSH, GT_RSZ. For shift nodes being shifted // by a constant int, we can inspect the shift amount and decompose to the appropriate // node types, generating a shl/shld pattern for GT_LSH, a shrd/shr pattern for GT_RSZ, // and a shrd/sar pattern for GT_SHR for most shift amounts. Shifting by 0, >= 32 and // >= 64 are special cased to produce better code patterns. // // For all other shift nodes, we need to use the shift helper functions, so we here convert // the shift into a helper call by pulling its arguments out of linear order and making // them the args to a call, then replacing the original node with the new call. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use) { assert(use.IsInitialized()); GenTree* shift = use.Def(); GenTree* gtLong = shift->gtGetOp1(); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); GenTree* shiftByOp = shift->gtGetOp2(); genTreeOps oper = shift->OperGet(); genTreeOps shiftByOper = shiftByOp->OperGet(); // tLo = ... // ... // tHi = ... // ... // tLong = long tLo, tHi // ... // tShiftAmount = ... // ... // tShift = shift tLong, tShiftAmount assert((oper == GT_LSH) || (oper == GT_RSH) || (oper == GT_RSZ)); // If we are shifting by a constant int, we do not want to use a helper, instead, we decompose. if (shiftByOper == GT_CNS_INT) { // Reduce count modulo 64 to match behavior found in the shift helpers, // Compiler::gtFoldExpr and ValueNumStore::EvalOpIntegral. unsigned int count = shiftByOp->AsIntCon()->gtIconVal & 0x3F; Range().Remove(shiftByOp); if (count == 0) { GenTree* next = shift->gtNext; // Remove shift and don't do anything else. if (shift->IsUnusedValue()) { gtLong->SetUnusedValue(); } Range().Remove(shift); use.ReplaceWith(gtLong); return next; } GenTree* loResult; GenTree* hiResult; GenTree* insertAfter; switch (oper) { case GT_LSH: { if (count < 32) { // For shifts of < 32 bits, we transform the code to: // // tLo = ... // st.lclVar vLo, tLo // ... // tHi = ... // ... // tShiftLo = lsh vLo, tShiftAmountLo // tShitHiLong = long vLo, tHi // tShiftHi = lsh_hi tShiftHiLong, tShiftAmountHi // // This will produce: // // reg1 = lo // shl lo, shift // shld hi, reg1, shift loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); unsigned loOp1LclNum = loOp1->AsLclVarCommon()->GetLclNum(); Range().Remove(loOp1); GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); loResult = m_compiler->gtNewOperNode(GT_LSH, TYP_INT, loOp1, shiftByLo); // Create a GT_LONG that contains loCopy and hiOp1. This will be used in codegen to // generate the shld instruction GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT); GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1); hiResult = m_compiler->gtNewOperNode(GT_LSH_HI, TYP_INT, hiOp, shiftByHi); Range().InsertBefore(shift, loOp1, shiftByLo, loResult); Range().InsertBefore(shift, loCopy, hiOp, shiftByHi, hiResult); insertAfter = hiResult; } else { assert(count >= 32 && count < 64); // Since we're left shifting at least 32 bits, we can remove the hi part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the hi operand while there are no side effects) if ((hiOp1->gtFlags & GTF_ALL_EFFECT) == 0) { Range().Remove(hiOp1, true); } else { hiOp1->SetUnusedValue(); } if (count == 32) { // Move loOp1 into hiResult (shift of 32 bits is just a mov of lo to hi) // We need to make sure that we save lo to a temp variable so that we don't overwrite lo // before saving it to hi in the case that we are doing an inplace shift. I.e.: // x = x << 32 LIR::Use loOp1Use(Range(), &gtLong->AsOp()->gtOp1, gtLong); loOp1Use.ReplaceWithLclVar(m_compiler); hiResult = loOp1Use.Def(); } else { assert(count > 32 && count < 64); // Move loOp1 into hiResult, do a GT_LSH with count - 32. // We will compute hiResult before loResult in this case, so we don't need to store lo to a // temp GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); hiResult = m_compiler->gtNewOperNode(oper, TYP_INT, loOp1, shiftBy); Range().InsertBefore(shift, shiftBy, hiResult); } // Zero out loResult (shift of >= 32 bits shifts all lo bits to hiResult) loResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertBefore(shift, loResult); insertAfter = loResult; } } break; case GT_RSZ: { if (count < 32) { // Hi is a GT_RSZ, lo is a GT_RSH_LO. Will produce: // reg1 = hi // shrd lo, reg1, shift // shr hi, shift hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSZ, TYP_INT, hiOp1, shiftByHi); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shrd instruction GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy); loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo); Range().InsertBefore(shift, hiCopy, loOp); Range().InsertBefore(shift, shiftByLo, loResult); Range().InsertBefore(shift, shiftByHi, hiResult); } else { assert(count >= 32 && count < 64); // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the lo operand while there are no side effects) if ((loOp1->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { Range().Remove(loOp1, true); } else { loOp1->SetUnusedValue(); } if (count == 32) { // Move hiOp1 into loResult. loResult = hiOp1; } else { assert(count > 32 && count < 64); // Move hiOp1 into loResult, do a GT_RSZ with count - 32. GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy); Range().InsertBefore(shift, shiftBy, loResult); } // Zero out hi hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertBefore(shift, hiResult); } insertAfter = hiResult; } break; case GT_RSH: { hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); Range().Remove(hiOp1); if (count < 32) { // Hi is a GT_RSH, lo is a GT_RSH_LO. Will produce: // reg1 = hi // shrd lo, reg1, shift // sar hi, shift GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiOp1, shiftByHi); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shrd instruction GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy); loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo); Range().InsertBefore(shift, hiCopy, loOp); Range().InsertBefore(shift, shiftByLo, loResult); Range().InsertBefore(shift, shiftByHi, hiOp1, hiResult); } else { assert(count >= 32 && count < 64); // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the lo operand while there are no side effects) if ((loOp1->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { Range().Remove(loOp1, true); } else { loOp1->SetUnusedValue(); } if (count == 32) { // Move hiOp1 into loResult. loResult = hiOp1; Range().InsertBefore(shift, loResult); } else { assert(count > 32 && count < 64); // Move hiOp1 into loResult, do a GT_RSH with count - 32. GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy); Range().InsertBefore(shift, hiOp1, shiftBy, loResult); } // Propagate sign bit in hiResult GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, shiftBy); Range().InsertBefore(shift, shiftBy, hiCopy, hiResult); } insertAfter = hiResult; } break; default: unreached(); } // Remove shift from Range Range().Remove(gtLong); Range().Remove(shift); return FinalizeDecomposition(use, loResult, hiResult, insertAfter); } else { // Because calls must be created as HIR and lowered to LIR, we need to dump // any LIR temps into lclVars before using them as arguments. shiftByOp = RepresentOpAsLocalVar(shiftByOp, shift, &shift->AsOp()->gtOp2); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); Range().Remove(shiftByOp); Range().Remove(gtLong); Range().Remove(loOp1); Range().Remove(hiOp1); unsigned helper; switch (oper) { case GT_LSH: helper = CORINFO_HELP_LLSH; break; case GT_RSH: helper = CORINFO_HELP_LRSH; break; case GT_RSZ: helper = CORINFO_HELP_LRSZ; break; default: unreached(); } GenTreeCall::Use* argList = m_compiler->gtNewCallArgs(loOp1, hiOp1, shiftByOp); GenTreeCall* call = m_compiler->gtNewHelperCallNode(helper, TYP_LONG, argList); call->gtFlags |= shift->gtFlags & GTF_ALL_EFFECT; if (shift->IsUnusedValue()) { call->SetUnusedValue(); } call = m_compiler->fgMorphArgs(call); Range().InsertAfter(shift, LIR::SeqTree(m_compiler, call)); Range().Remove(shift); use.ReplaceWith(call); return call; } } //------------------------------------------------------------------------ // DecomposeRotate: Decompose GT_ROL and GT_ROR with constant shift amounts. We can // inspect the rotate amount and decompose to the appropriate node types, generating // a shld/shld pattern for GT_ROL, a shrd/shrd pattern for GT_ROR, for most rotate // amounts. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeRotate(LIR::Use& use) { GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); GenTree* rotateByOp = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ROL) || (oper == GT_ROR)); assert(rotateByOp->IsCnsIntOrI()); // For longs, we need to change rols into two GT_LSH_HIs and rors into two GT_RSH_LOs // so we will get: // // shld lo, hi, rotateAmount // shld hi, loCopy, rotateAmount // // or: // // shrd lo, hi, rotateAmount // shrd hi, loCopy, rotateAmount if (oper == GT_ROL) { oper = GT_LSH_HI; } else { oper = GT_RSH_LO; } unsigned count = (unsigned)rotateByOp->AsIntCon()->gtIconVal; Range().Remove(rotateByOp); // Make sure the rotate amount is between 0 and 63. assert((count < 64) && (count != 0)); GenTree* loResult; GenTree* hiResult; if (count == 32) { // If the rotate amount is 32, then swap hi and lo LIR::Use loOp1Use(Range(), &gtLong->AsOp()->gtOp1, gtLong); loOp1Use.ReplaceWithLclVar(m_compiler); LIR::Use hiOp1Use(Range(), &gtLong->AsOp()->gtOp2, gtLong); hiOp1Use.ReplaceWithLclVar(m_compiler); hiResult = loOp1Use.Def(); loResult = hiOp1Use.Def(); gtLong->AsOp()->gtOp1 = loResult; gtLong->AsOp()->gtOp2 = hiResult; if (tree->IsUnusedValue()) { gtLong->SetUnusedValue(); } GenTree* next = tree->gtNext; // Remove tree and don't do anything else. Range().Remove(tree); use.ReplaceWith(gtLong); return next; } else { GenTree* loOp1; GenTree* hiOp1; if (count > 32) { // If count > 32, we swap hi and lo, and subtract 32 from count hiOp1 = gtLong->gtGetOp1(); loOp1 = gtLong->gtGetOp2(); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp2); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp1); count -= 32; } else { loOp1 = gtLong->gtGetOp1(); hiOp1 = gtLong->gtGetOp2(); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); } Range().Remove(gtLong); unsigned loOp1LclNum = loOp1->AsLclVarCommon()->GetLclNum(); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); Range().Remove(loOp1); Range().Remove(hiOp1); GenTree* rotateByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* rotateByLo = m_compiler->gtNewIconNode(count, TYP_INT); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shld instruction GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, hiCopy, loOp1); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, loOp, rotateByLo); // Create a GT_LONG that contains loCopy and hiOp1. This will be used in codegen to // generate the shld instruction GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT); GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1); hiResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp, rotateByHi); Range().InsertBefore(tree, hiCopy, loOp1, loOp); Range().InsertBefore(tree, rotateByLo, loResult); Range().InsertBefore(tree, loCopy, hiOp1, hiOp); Range().InsertBefore(tree, rotateByHi, hiResult); Range().Remove(tree); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } } //------------------------------------------------------------------------ // DecomposeMul: Decompose GT_MUL. The only GT_MULs that make it to decompose are // those with the GTF_MUL_64RSLT flag set. These muls result in a mul instruction that // returns its result in two registers like GT_CALLs do. Additionally, these muls are // guaranteed to be in the form long = (long)int * (long)int. Therefore, to decompose // these nodes, we convert them into GT_MUL_LONGs, undo the cast from int to long by // stripping out the lo ops, and force them into the form var = mul, as we do for // GT_CALLs. In codegen, we then produce a mul instruction that produces the result // in edx:eax on x86 or in any two chosen by RA registers on arm32, and store those // registers on the stack in genStoreLongLclVar. // // All other GT_MULs have been converted to helper calls in morph.cpp // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeMul(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); assert(tree->OperIs(GT_MUL)); assert(tree->Is64RsltMul()); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(op1->TypeIs(TYP_LONG) && op2->TypeIs(TYP_LONG)); // We expect the first operand to be an int->long cast. // DecomposeCast specifically ignores such casts when they are used by GT_MULs. assert(op1->OperIs(GT_CAST)); // The second operand can be a cast or a constant. if (!op2->OperIs(GT_CAST)) { assert(op2->OperIs(GT_LONG)); assert(op2->gtGetOp1()->IsIntegralConst()); assert(op2->gtGetOp2()->IsIntegralConst()); Range().Remove(op2->gtGetOp2()); } Range().Remove(op1); Range().Remove(op2); tree->AsOp()->gtOp1 = op1->gtGetOp1(); tree->AsOp()->gtOp2 = op2->gtGetOp1(); tree->SetOper(GT_MUL_LONG); return StoreNodeToVar(use); } //------------------------------------------------------------------------ // DecomposeUMod: Decompose GT_UMOD. The only GT_UMODs that make it to decompose // are guaranteed to be an unsigned long mod with op2 which is a cast to long from // a constant int whose value is between 2 and 0x3fffffff. All other GT_UMODs are // morphed into helper calls. These GT_UMODs will actually return an int value in // RDX. In decompose, we make the lo operation a TYP_INT GT_UMOD, with op2 as the // original lo half and op1 as a GT_LONG. We make the hi part 0, so we end up with: // // GT_UMOD[TYP_INT] ( GT_LONG [TYP_LONG] (loOp1, hiOp1), loOp2 [TYP_INT] ) // // With the expectation that we will generate: // // EDX = hiOp1 // EAX = loOp1 // reg = loOp2 // idiv reg // EDX is the remainder, and result of GT_UMOD // mov hiReg = 0 // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeUMod(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert(oper == GT_UMOD); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(op1->OperGet() == GT_LONG); assert(op2->OperGet() == GT_LONG); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); assert(loOp2->OperGet() == GT_CNS_INT); assert(hiOp2->OperGet() == GT_CNS_INT); assert((loOp2->AsIntCon()->gtIconVal >= 2) && (loOp2->AsIntCon()->gtIconVal <= 0x3fffffff)); assert(hiOp2->AsIntCon()->gtIconVal == 0); // Get rid of op2's hi part. We don't need it. Range().Remove(hiOp2); Range().Remove(op2); // Lo part is the GT_UMOD GenTree* loResult = tree; loResult->AsOp()->gtOp2 = loOp2; loResult->gtType = TYP_INT; // Set the high part to 0 GenTree* hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // DecomposeHWIntrinsic: Decompose GT_HWINTRINSIC. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeHWIntrinsic(LIR::Use& use) { GenTree* tree = use.Def(); assert(tree->OperIs(GT_HWINTRINSIC)); GenTreeHWIntrinsic* hwintrinsicTree = tree->AsHWIntrinsic(); switch (hwintrinsicTree->GetHWIntrinsicId()) { case NI_Vector128_GetElement: case NI_Vector256_GetElement: return DecomposeHWIntrinsicGetElement(use, hwintrinsicTree); default: noway_assert(!"unexpected GT_HWINTRINSIC node in long decomposition"); break; } return nullptr; } //------------------------------------------------------------------------ // DecomposeHWIntrinsicGetElement: Decompose GT_HWINTRINSIC -- NI_Vector*_GetElement. // // Decompose a get[i] node on Vector*<long>. For: // // GT_HWINTRINSIC{GetElement}[long](simd_var, index) // // create: // // tmp_simd_var = simd_var // tmp_index = index // loResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, tmp_index * 2) // hiResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, tmp_index * 2 + 1) // return: GT_LONG(loResult, hiResult) // // This isn't optimal codegen, since NI_Vector*_GetElement sometimes requires // temps that could be shared, for example. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // node - the hwintrinsic node to decompose // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeHWIntrinsicGetElement(LIR::Use& use, GenTreeHWIntrinsic* node) { assert(node == use.Def()); assert(varTypeIsLong(node)); assert((node->GetHWIntrinsicId() == NI_Vector128_GetElement) || (node->GetHWIntrinsicId() == NI_Vector256_GetElement)); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(varTypeIsLong(simdBaseType)); assert(varTypeIsSIMD(op1->TypeGet())); assert(op2->TypeIs(TYP_INT)); bool indexIsConst = op2->OperIsConst(); ssize_t index = 0; if (indexIsConst) { index = op2->AsIntCon()->IconValue(); } GenTree* simdTmpVar = RepresentOpAsLocalVar(op1, node, &node->Op(1)); unsigned simdTmpVarNum = simdTmpVar->AsLclVarCommon()->GetLclNum(); JITDUMP("[DecomposeHWIntrinsicGetElement]: Saving op1 tree to a temp var:\n"); DISPTREERANGE(Range(), simdTmpVar); Range().Remove(simdTmpVar); op1 = node->Op(1); GenTree* indexTmpVar = nullptr; unsigned indexTmpVarNum = 0; if (!indexIsConst) { indexTmpVar = RepresentOpAsLocalVar(op2, node, &node->Op(2)); indexTmpVarNum = indexTmpVar->AsLclVarCommon()->GetLclNum(); JITDUMP("[DecomposeHWIntrinsicGetElement]: Saving op2 tree to a temp var:\n"); DISPTREERANGE(Range(), indexTmpVar); Range().Remove(indexTmpVar); op2 = node->Op(2); } // Create: // loResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, index * 2) GenTree* simdTmpVar1 = simdTmpVar; GenTree* indexTimesTwo1; if (indexIsConst) { // Reuse the existing index constant node. indexTimesTwo1 = op2; Range().Remove(indexTimesTwo1); indexTimesTwo1->AsIntCon()->SetIconValue(index * 2); Range().InsertBefore(node, simdTmpVar1, indexTimesTwo1); } else { GenTree* indexTmpVar1 = indexTmpVar; GenTree* two1 = m_compiler->gtNewIconNode(2, TYP_INT); indexTimesTwo1 = m_compiler->gtNewOperNode(GT_MUL, TYP_INT, indexTmpVar1, two1); Range().InsertBefore(node, simdTmpVar1, indexTmpVar1, two1, indexTimesTwo1); } GenTree* loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar1, indexTimesTwo1, node->GetHWIntrinsicId(), CORINFO_TYPE_INT, simdSize); Range().InsertBefore(node, loResult); // Create: // hiResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, index * 2 + 1) GenTree* simdTmpVar2 = m_compiler->gtNewLclLNode(simdTmpVarNum, op1->TypeGet()); GenTree* indexTimesTwoPlusOne; if (indexIsConst) { indexTimesTwoPlusOne = m_compiler->gtNewIconNode(index * 2 + 1, TYP_INT); Range().InsertBefore(node, simdTmpVar2, indexTimesTwoPlusOne); } else { GenTree* indexTmpVar2 = m_compiler->gtNewLclLNode(indexTmpVarNum, TYP_INT); GenTree* two2 = m_compiler->gtNewIconNode(2, TYP_INT); GenTree* indexTimesTwo2 = m_compiler->gtNewOperNode(GT_MUL, TYP_INT, indexTmpVar2, two2); GenTree* one = m_compiler->gtNewIconNode(1, TYP_INT); indexTimesTwoPlusOne = m_compiler->gtNewOperNode(GT_ADD, TYP_INT, indexTimesTwo2, one); Range().InsertBefore(node, simdTmpVar2, indexTmpVar2, two2, indexTimesTwo2); Range().InsertBefore(node, one, indexTimesTwoPlusOne); } GenTree* hiResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar2, indexTimesTwoPlusOne, node->GetHWIntrinsicId(), CORINFO_TYPE_INT, simdSize); Range().InsertBefore(node, hiResult); // Done with the original tree; remove it. Range().Remove(node); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // OptimizeCastFromDecomposedLong: optimizes a cast from GT_LONG by discarding // the high part of the source and, if the cast is to INT, the cast node itself. // Accounts for side effects and marks nodes unused as neccessary. // // Only accepts casts to integer types that are not long. // Does not optimize checked casts. // // Arguments: // cast - the cast tree that has a GT_LONG node as its operand. // nextNode - the next candidate for decomposition. // // Return Value: // The next node to process in DecomposeRange: "nextNode->gtNext" if // "cast == nextNode", simply "nextNode" otherwise. // // Notes: // Because "nextNode" usually is "cast", and this method may remove "cast" // from the linear order, it needs to return the updated "nextNode". Instead // of receiving it as an argument, it could assume that "nextNode" is always // "cast->CastOp()->gtNext", but not making that assumption seems better. // GenTree* DecomposeLongs::OptimizeCastFromDecomposedLong(GenTreeCast* cast, GenTree* nextNode) { GenTreeOp* src = cast->CastOp()->AsOp(); var_types dstType = cast->CastToType(); assert(src->OperIs(GT_LONG)); assert(genActualType(dstType) == TYP_INT); if (cast->gtOverflow()) { return nextNode; } GenTree* loSrc = src->gtGetOp1(); GenTree* hiSrc = src->gtGetOp2(); JITDUMP("Optimizing a truncating cast [%06u] from decomposed LONG [%06u]\n", cast->gtTreeID, src->gtTreeID); INDEBUG(GenTree* treeToDisplay = cast); // TODO-CQ: we could go perform this removal transitively. // See also identical code in shift decomposition. if ((hiSrc->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { JITDUMP("Removing the HI part of [%06u] and marking its operands unused:\n", src->gtTreeID); DISPNODE(hiSrc); Range().Remove(hiSrc, /* markOperandsUnused */ true); } else { JITDUMP("The HI part of [%06u] has side effects, marking it unused\n", src->gtTreeID); hiSrc->SetUnusedValue(); } JITDUMP("Removing the LONG source:\n"); DISPNODE(src); Range().Remove(src); if (varTypeIsSmall(dstType)) { JITDUMP("Cast is to a small type, keeping it, the new source is [%06u]\n", loSrc->gtTreeID); cast->CastOp() = loSrc; } else { LIR::Use useOfCast; if (Range().TryGetUse(cast, &useOfCast)) { useOfCast.ReplaceWith(loSrc); } else { loSrc->SetUnusedValue(); } if (nextNode == cast) { nextNode = nextNode->gtNext; } INDEBUG(treeToDisplay = loSrc); JITDUMP("Removing the cast:\n"); DISPNODE(cast); Range().Remove(cast); } JITDUMP("Final result:\n") DISPTREERANGE(Range(), treeToDisplay); return nextNode; } //------------------------------------------------------------------------ // StoreNodeToVar: Check if the user is a STORE_LCL_VAR, and if it isn't, // store the node to a var. Then decompose the new LclVar. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::StoreNodeToVar(LIR::Use& use) { if (use.IsDummyUse()) return use.Def()->gtNext; GenTree* tree = use.Def(); GenTree* user = use.User(); if (user->OperGet() == GT_STORE_LCL_VAR) { // If parent is already a STORE_LCL_VAR, we can skip it if // it is already marked as lvIsMultiRegRet. unsigned varNum = user->AsLclVarCommon()->GetLclNum(); if (m_compiler->lvaTable[varNum].lvIsMultiRegRet) { return tree->gtNext; } else if (!m_compiler->lvaTable[varNum].lvPromoted) { // If var wasn't promoted, we can just set lvIsMultiRegRet. m_compiler->lvaTable[varNum].lvIsMultiRegRet = true; return tree->gtNext; } } // Otherwise, we need to force var = call() unsigned varNum = use.ReplaceWithLclVar(m_compiler); m_compiler->lvaTable[varNum].lvIsMultiRegRet = true; // Decompose the new LclVar use return DecomposeLclVar(use); } //------------------------------------------------------------------------ // Check is op already local var, if not store it to local. // // Arguments: // op - GenTree* to represent as local variable // user - user of op // edge - edge from user to op // // Return Value: // op represented as local var // GenTree* DecomposeLongs::RepresentOpAsLocalVar(GenTree* op, GenTree* user, GenTree** edge) { if (op->OperGet() == GT_LCL_VAR) { return op; } else { LIR::Use opUse(Range(), edge, user); opUse.ReplaceWithLclVar(m_compiler); return *edge; } } //------------------------------------------------------------------------ // DecomposeLongs::EnsureIntSized: // Checks to see if the given node produces an int-sized value and // performs the appropriate widening if it does not. // // Arguments: // node - The node that may need to be widened. // signExtend - True if the value should be sign-extended; false if it // should be zero-extended. // // Return Value: // The node that produces the widened value. GenTree* DecomposeLongs::EnsureIntSized(GenTree* node, bool signExtend) { assert(node != nullptr); if (!varTypeIsSmall(node)) { assert(genTypeSize(node) == genTypeSize(TYP_INT)); return node; } if (node->OperIs(GT_LCL_VAR) && !m_compiler->lvaTable[node->AsLclVarCommon()->GetLclNum()].lvNormalizeOnLoad()) { node->gtType = TYP_INT; return node; } GenTree* const cast = m_compiler->gtNewCastNode(TYP_INT, node, !signExtend, node->TypeGet()); Range().InsertAfter(node, cast); return cast; } //------------------------------------------------------------------------ // GetHiOper: Convert arithmetic operator to "high half" operator of decomposed node. // // Arguments: // oper - operator to map // // Return Value: // mapped operator // // static genTreeOps DecomposeLongs::GetHiOper(genTreeOps oper) { switch (oper) { case GT_ADD: return GT_ADD_HI; break; case GT_SUB: return GT_SUB_HI; break; case GT_OR: return GT_OR; break; case GT_AND: return GT_AND; break; case GT_XOR: return GT_XOR; break; default: assert(!"GetHiOper called for invalid oper"); return GT_NONE; } } //------------------------------------------------------------------------ // GetLoOper: Convert arithmetic operator to "low half" operator of decomposed node. // // Arguments: // oper - operator to map // // Return Value: // mapped operator // // static genTreeOps DecomposeLongs::GetLoOper(genTreeOps oper) { switch (oper) { case GT_ADD: return GT_ADD_LO; break; case GT_SUB: return GT_SUB_LO; break; case GT_OR: return GT_OR; break; case GT_AND: return GT_AND; break; case GT_XOR: return GT_XOR; break; default: assert(!"GetLoOper called for invalid oper"); return GT_NONE; } } //------------------------------------------------------------------------ // PromoteLongVars: "Struct promote" all register candidate longs as if they are structs of two ints. // // Arguments: // None. // // Return Value: // None. // void DecomposeLongs::PromoteLongVars() { if (!m_compiler->compEnregLocals()) { return; } // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = m_compiler->lvaCount; for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum); if (!varTypeIsLong(varDsc)) { continue; } if (varDsc->lvDoNotEnregister) { continue; } if (varDsc->lvRefCnt() == 0) { continue; } if (varDsc->lvIsStructField) { continue; } if (m_compiler->fgNoStructPromotion) { continue; } if (m_compiler->fgNoStructParamPromotion && varDsc->lvIsParam) { continue; } assert(!varDsc->lvIsMultiRegArgOrRet()); varDsc->lvFieldCnt = 2; varDsc->lvFieldLclStart = m_compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = false; JITDUMP("\nPromoting long local V%02u:", lclNum); bool isParam = varDsc->lvIsParam; for (unsigned index = 0; index < 2; ++index) { // Grab the temp for the field local. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, index == 0 ? "lo" : "hi", index * 4); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = m_compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); #endif unsigned varNum = m_compiler->lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so // they are long lifetime temps. LclVarDsc* fieldVarDsc = m_compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = TYP_INT; fieldVarDsc->lvExactSize = genTypeSize(TYP_INT); fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFldOffset = (unsigned char)(index * genTypeSize(TYP_INT)); fieldVarDsc->lvFldOrdinal = (unsigned char)index; fieldVarDsc->lvParentLcl = lclNum; // Currently we do not support enregistering incoming promoted aggregates with more than one field. if (isParam) { fieldVarDsc->lvIsParam = true; m_compiler->lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LongParamField)); } } } #ifdef DEBUG if (m_compiler->verbose) { printf("\nlvaTable after PromoteLongVars\n"); m_compiler->lvaTableDump(); } #endif // DEBUG } #endif // !defined(TARGET_64BIT)
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX DecomposeLongs XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX*/ // // This file contains code to decompose 64-bit LONG operations on 32-bit platforms // into multiple single-register operations so individual register usage and requirements // are explicit for LSRA. The rationale behind this is to avoid adding code complexity // downstream caused by the introduction of handling longs as special cases, // especially in LSRA. // // Long decomposition happens on a statement immediately prior to more general // purpose lowering. // #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifndef TARGET_64BIT // DecomposeLongs is only used on 32-bit platforms #include "decomposelongs.h" //------------------------------------------------------------------------ // DecomposeLongs::PrepareForDecomposition: // Do one-time preparation required for LONG decomposition. Namely, // promote long variables to multi-register structs. // // Arguments: // None // // Return Value: // None. // void DecomposeLongs::PrepareForDecomposition() { PromoteLongVars(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeBlock: // Do LONG decomposition on all the nodes in the given block. This must // be done before lowering the block, as decomposition can insert // additional nodes. // // Arguments: // block - the block to process // // Return Value: // None. // void DecomposeLongs::DecomposeBlock(BasicBlock* block) { assert(block == m_compiler->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_range = &LIR::AsRange(block); DecomposeRangeHelper(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeRange: // Do LONG decomposition on all the nodes in the given range. This must // be done before inserting a range of un-decomposed IR into a block // that has already been decomposed. // // Arguments: // compiler - The compiler context. // range - The range to decompose. // // Return Value: // None. // void DecomposeLongs::DecomposeRange(Compiler* compiler, LIR::Range& range) { assert(compiler != nullptr); DecomposeLongs decomposer(compiler); decomposer.m_range = &range; decomposer.DecomposeRangeHelper(); } //------------------------------------------------------------------------ // DecomposeLongs::DecomposeRangeHelper: // Decompiose each node in the current range. // // Decomposition is done as an execution-order walk. Decomposition of // a particular node can create new nodes that need to be further // decomposed at higher levels. That is, decomposition "bubbles up" // through dataflow. // void DecomposeLongs::DecomposeRangeHelper() { assert(m_range != nullptr); GenTree* node = Range().FirstNode(); while (node != nullptr) { node = DecomposeNode(node); } assert(Range().CheckLIR(m_compiler, true)); } //------------------------------------------------------------------------ // DecomposeNode: Decompose long-type trees into lower and upper halves. // // Arguments: // tree - the tree that will, if needed, be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNode(GenTree* tree) { // Handle the case where we are implicitly using the lower half of a long lclVar. if ((tree->TypeGet() == TYP_INT) && tree->OperIsLocal()) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(tree->AsLclVarCommon()); if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { #ifdef DEBUG if (m_compiler->verbose) { printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted " "half:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG unsigned loVarNum = varDsc->lvFieldLclStart; tree->AsLclVarCommon()->SetLclNum(loVarNum); return tree->gtNext; } } if (tree->TypeGet() != TYP_LONG) { return tree->gtNext; } #ifdef DEBUG if (m_compiler->verbose) { printf("Decomposing TYP_LONG tree. BEFORE:\n"); m_compiler->gtDispTreeRange(Range(), tree); } #endif // DEBUG LIR::Use use; if (!Range().TryGetUse(tree, &use)) { use = LIR::Use::GetDummyUse(Range(), tree); } GenTree* nextNode = nullptr; switch (tree->OperGet()) { case GT_LCL_VAR: nextNode = DecomposeLclVar(use); break; case GT_LCL_FLD: nextNode = DecomposeLclFld(use); break; case GT_STORE_LCL_VAR: nextNode = DecomposeStoreLclVar(use); break; case GT_CAST: nextNode = DecomposeCast(use); break; case GT_CNS_LNG: nextNode = DecomposeCnsLng(use); break; case GT_CALL: nextNode = DecomposeCall(use); break; case GT_RETURN: assert(tree->AsOp()->gtOp1->OperGet() == GT_LONG); break; case GT_STOREIND: nextNode = DecomposeStoreInd(use); break; case GT_STORE_LCL_FLD: nextNode = DecomposeStoreLclFld(use); break; case GT_IND: nextNode = DecomposeInd(use); break; case GT_NOT: nextNode = DecomposeNot(use); break; case GT_NEG: nextNode = DecomposeNeg(use); break; // Binary operators. Those that require different computation for upper and lower half are // handled by the use of GetHiOper(). case GT_ADD: case GT_SUB: case GT_OR: case GT_XOR: case GT_AND: nextNode = DecomposeArith(use); break; case GT_MUL: nextNode = DecomposeMul(use); break; case GT_UMOD: nextNode = DecomposeUMod(use); break; case GT_LSH: case GT_RSH: case GT_RSZ: nextNode = DecomposeShift(use); break; case GT_ROL: case GT_ROR: nextNode = DecomposeRotate(use); break; #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: nextNode = DecomposeHWIntrinsic(use); break; #endif // FEATURE_HW_INTRINSICS case GT_LOCKADD: case GT_XORR: case GT_XAND: case GT_XADD: case GT_XCHG: case GT_CMPXCHG: NYI("Interlocked operations on TYP_LONG"); break; default: { JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::OpName(tree->OperGet())); assert(!"Illegal TYP_LONG node in Decomposition."); break; } } // If we replaced the argument to a GT_FIELD_LIST element with a GT_LONG node, split that field list // element into two elements: one for each half of the GT_LONG. if ((use.Def()->OperGet() == GT_LONG) && !use.IsDummyUse() && (use.User()->OperGet() == GT_FIELD_LIST)) { DecomposeFieldList(use.User()->AsFieldList(), use.Def()->AsOp()); } #ifdef DEBUG if (m_compiler->verbose) { // NOTE: st_lcl_var doesn't dump properly afterwards. printf("Decomposing TYP_LONG tree. AFTER:\n"); m_compiler->gtDispTreeRange(Range(), use.Def()); } #endif // When casting from a decomposed long to a smaller integer we can discard the high part. if (m_compiler->opts.OptimizationEnabled() && !use.IsDummyUse() && use.User()->OperIs(GT_CAST) && use.User()->TypeIs(TYP_INT) && use.Def()->OperIs(GT_LONG)) { nextNode = OptimizeCastFromDecomposedLong(use.User()->AsCast(), nextNode); } return nextNode; } //------------------------------------------------------------------------ // FinalizeDecomposition: A helper function to finalize LONG decomposition by // taking the resulting two halves of the decomposition, and tie them together // with a new GT_LONG node that will replace the original node. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // loResult - the decomposed low part // hiResult - the decomposed high part // insertResultAfter - the node that the GT_LONG should be inserted after // // Return Value: // The next node to process. // GenTree* DecomposeLongs::FinalizeDecomposition(LIR::Use& use, GenTree* loResult, GenTree* hiResult, GenTree* insertResultAfter) { assert(use.IsInitialized()); assert(loResult != nullptr); assert(hiResult != nullptr); assert(Range().Contains(loResult)); assert(Range().Contains(hiResult)); GenTree* gtLong = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loResult, hiResult); if (use.IsDummyUse()) { gtLong->SetUnusedValue(); } loResult->ClearUnusedValue(); hiResult->ClearUnusedValue(); Range().InsertAfter(insertResultAfter, gtLong); use.ReplaceWith(gtLong); return gtLong->gtNext; } //------------------------------------------------------------------------ // DecomposeLclVar: Decompose GT_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_LCL_VAR); GenTree* tree = use.Def(); unsigned varNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = m_compiler->lvaGetDesc(varNum); GenTree* loResult = tree; loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclLNode(varNum, TYP_INT); Range().InsertAfter(loResult, hiResult); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; loResult->AsLclVarCommon()->SetLclNum(loVarNum); hiResult->AsLclVarCommon()->SetLclNum(hiVarNum); } else { m_compiler->lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); loResult->SetOper(GT_LCL_FLD); loResult->AsLclFld()->SetLclOffs(0); loResult->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); hiResult->SetOper(GT_LCL_FLD); hiResult->AsLclFld()->SetLclOffs(4); hiResult->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeLclFld: Decompose GT_LCL_FLD. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeLclFld(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_LCL_FLD); GenTree* tree = use.Def(); GenTreeLclFld* loResult = tree->AsLclFld(); loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclFldNode(loResult->GetLclNum(), TYP_INT, loResult->GetLclOffs() + 4); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeStoreLclVar: Decompose GT_STORE_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STORE_LCL_VAR); GenTree* tree = use.Def(); GenTree* rhs = tree->gtGetOp1(); if (rhs->OperIs(GT_CALL) || (rhs->OperIs(GT_MUL_LONG) && (rhs->gtFlags & GTF_MUL_64RSLT) != 0)) { // GT_CALLs are not decomposed, so will not be converted to GT_LONG // GT_STORE_LCL_VAR = GT_CALL are handled in genMultiRegCallStoreToLocal // GT_MULs are not decomposed, so will not be converted to GT_LONG return tree->gtNext; } noway_assert(rhs->OperGet() == GT_LONG); const LclVarDsc* varDsc = m_compiler->lvaGetDesc(tree->AsLclVarCommon()); if (!varDsc->lvPromoted) { // We cannot decompose a st.lclVar that is not promoted because doing so // changes its liveness semantics. For example, consider the following // decomposition of a st.lclVar into two st.lclFlds: // // Before: // // /--* t0 int // +--* t1 int // t2 = * gt_long long // // /--* t2 long // * st.lclVar long V0 // // After: // /--* t0 int // * st.lclFld int V0 [+0] // // /--* t1 int // * st.lclFld int V0 [+4] // // Before decomposition, the `st.lclVar` is a simple def of `V0`. After // decomposition, each `st.lclFld` is a partial def of `V0`. This partial // def is treated as both a use and a def of the appropriate lclVar. This // difference will affect any situation in which the liveness of a variable // at a def matters (e.g. dead store elimination, live-in sets, etc.). As // a result, we leave these stores as-is and generate the decomposed store // in the code generator. // // NOTE: this does extend the lifetime of the low half of the `GT_LONG` // node as compared to the decomposed form. If we start doing more code // motion in the backend, this may cause some CQ issues and some sort of // decomposition could be beneficial. return tree->gtNext; } assert(varDsc->lvFieldCnt == 2); GenTreeOp* value = rhs->AsOp(); Range().Remove(value); const unsigned loVarNum = varDsc->lvFieldLclStart; GenTree* loStore = tree; loStore->AsLclVarCommon()->SetLclNum(loVarNum); loStore->AsOp()->gtOp1 = value->gtOp1; loStore->gtType = TYP_INT; const unsigned hiVarNum = loVarNum + 1; GenTree* hiStore = m_compiler->gtNewLclLNode(hiVarNum, TYP_INT); hiStore->SetOper(GT_STORE_LCL_VAR); hiStore->AsOp()->gtOp1 = value->gtOp2; hiStore->gtFlags |= GTF_VAR_DEF; Range().InsertAfter(tree, hiStore); return hiStore->gtNext; } //------------------------------------------------------------------------ // DecomposeStoreLclFld: Decompose GT_STORE_LCL_FLD. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreLclFld(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STORE_LCL_FLD); GenTreeLclFld* store = use.Def()->AsLclFld(); GenTreeOp* value = store->gtOp1->AsOp(); assert(value->OperGet() == GT_LONG); Range().Remove(value); // The original store node will be repurposed to store the low half of the GT_LONG. GenTreeLclFld* loStore = store; loStore->gtOp1 = value->gtOp1; loStore->gtType = TYP_INT; loStore->gtFlags |= GTF_VAR_USEASG; // Create the store for the upper half of the GT_LONG and insert it after the low store. GenTreeLclFld* hiStore = m_compiler->gtNewLclFldNode(loStore->GetLclNum(), TYP_INT, loStore->GetLclOffs() + 4); hiStore->SetOper(GT_STORE_LCL_FLD); hiStore->gtOp1 = value->gtOp2; hiStore->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); Range().InsertAfter(loStore, hiStore); return hiStore->gtNext; } //------------------------------------------------------------------------ // DecomposeCast: Decompose GT_CAST. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CAST); GenTree* cast = use.Def()->AsCast(); GenTree* loResult = nullptr; GenTree* hiResult = nullptr; var_types srcType = cast->CastFromType(); var_types dstType = cast->CastToType(); if ((cast->gtFlags & GTF_UNSIGNED) != 0) { srcType = varTypeToUnsigned(srcType); } bool skipDecomposition = false; if (varTypeIsLong(srcType)) { if (cast->gtOverflow() && (varTypeIsUnsigned(srcType) != varTypeIsUnsigned(dstType))) { GenTree* srcOp = cast->gtGetOp1(); noway_assert(srcOp->OperGet() == GT_LONG); GenTree* loSrcOp = srcOp->gtGetOp1(); GenTree* hiSrcOp = srcOp->gtGetOp2(); // // When casting between long types an overflow check is needed only if the types // have different signedness. In both cases (long->ulong and ulong->long) we only // need to check if the high part is negative or not. Use the existing cast node // to perform a int->uint cast of the high part to take advantage of the overflow // check provided by codegen. // const bool signExtend = (cast->gtFlags & GTF_UNSIGNED) == 0; loResult = EnsureIntSized(loSrcOp, signExtend); hiResult = cast; hiResult->gtType = TYP_INT; hiResult->AsCast()->gtCastType = TYP_UINT; hiResult->gtFlags &= ~GTF_UNSIGNED; hiResult->AsOp()->gtOp1 = hiSrcOp; Range().Remove(srcOp); } else { NYI("Unimplemented long->long no-op cast decomposition"); } } else if (varTypeIsIntegralOrI(srcType)) { if (cast->gtOverflow() && !varTypeIsUnsigned(srcType) && varTypeIsUnsigned(dstType)) { // // An overflow check is needed only when casting from a signed type to ulong. // Change the cast type to uint to take advantage of the overflow check provided // by codegen and then zero extend the resulting uint to ulong. // loResult = cast; loResult->AsCast()->gtCastType = TYP_UINT; loResult->gtType = TYP_INT; hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(loResult, hiResult); } else { if (!use.IsDummyUse() && (use.User()->OperGet() == GT_MUL)) { // // This int->long cast is used by a GT_MUL that will be transformed by DecomposeMul into a // GT_MUL_LONG and as a result the high operand produced by the cast will become dead. // Skip cast decomposition so DecomposeMul doesn't need to bother with dead code removal, // especially in the case of sign extending casts that also introduce new lclvars. // assert(use.User()->Is64RsltMul()); skipDecomposition = true; } else if (varTypeIsUnsigned(srcType)) { const bool signExtend = (cast->gtFlags & GTF_UNSIGNED) == 0; loResult = EnsureIntSized(cast->gtGetOp1(), signExtend); hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(cast, hiResult); Range().Remove(cast); } else { LIR::Use src(Range(), &(cast->AsOp()->gtOp1), cast); unsigned lclNum = src.ReplaceWithLclVar(m_compiler); loResult = src.Def(); GenTree* loCopy = m_compiler->gtNewLclvNode(lclNum, TYP_INT); GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, loCopy, shiftBy); Range().InsertAfter(cast, loCopy, shiftBy, hiResult); Range().Remove(cast); } } } else { NYI("Unimplemented cast decomposition"); } if (skipDecomposition) { return cast->gtNext; } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeCnsLng: Decompose GT_CNS_LNG. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCnsLng(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CNS_LNG); GenTree* tree = use.Def(); INT32 loVal = tree->AsLngCon()->LoVal(); INT32 hiVal = tree->AsLngCon()->HiVal(); GenTree* loResult = tree; loResult->BashToConst(loVal); GenTree* hiResult = m_compiler->gtNewIconNode(hiVal); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeFieldList: Decompose GT_FIELD_LIST. // // Arguments: // fieldList - the GT_FIELD_LIST node that uses the given GT_LONG node. // longNode - the node to decompose // // Return Value: // The next node to process. // // Notes: // Split a LONG field list element into two elements: one for each half of the GT_LONG. // GenTree* DecomposeLongs::DecomposeFieldList(GenTreeFieldList* fieldList, GenTreeOp* longNode) { assert(longNode->OperGet() == GT_LONG); GenTreeFieldList::Use* loUse = nullptr; for (GenTreeFieldList::Use& use : fieldList->Uses()) { if (use.GetNode() == longNode) { loUse = &use; break; } } assert(loUse != nullptr); Range().Remove(longNode); loUse->SetNode(longNode->gtGetOp1()); loUse->SetType(TYP_INT); fieldList->InsertFieldLIR(m_compiler, loUse, longNode->gtGetOp2(), loUse->GetOffset() + 4, TYP_INT); return fieldList->gtNext; } //------------------------------------------------------------------------ // DecomposeCall: Decompose GT_CALL. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeCall(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_CALL); // We only need to force var = call() if the call's result is used. return StoreNodeToVar(use); } //------------------------------------------------------------------------ // DecomposeStoreInd: Decompose GT_STOREIND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreInd(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STOREIND); GenTree* tree = use.Def(); assert(tree->AsOp()->gtOp2->OperGet() == GT_LONG); // Example input (address expression omitted): // // t51 = const int 0x37C05E7D // t154 = const int 0x2A0A3C80 // / --* t51 int // + --* t154 int // t155 = *gt_long long // / --* t52 byref // + --* t155 long // * storeIndir long GenTree* gtLong = tree->AsOp()->gtOp2; // Save address to a temp. It is used in storeIndLow and storeIndHigh trees. LIR::Use address(Range(), &tree->AsOp()->gtOp1, tree); address.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving address tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); if (!gtLong->AsOp()->gtOp1->OperIsLeaf()) { LIR::Use op1(Range(), &gtLong->AsOp()->gtOp1, gtLong); op1.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving low data tree to a temp var:\n"); DISPTREERANGE(Range(), op1.Def()); } if (!gtLong->AsOp()->gtOp2->OperIsLeaf()) { LIR::Use op2(Range(), &gtLong->AsOp()->gtOp2, gtLong); op2.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeStoreInd]: Saving high data tree to a temp var:\n"); DISPTREERANGE(Range(), op2.Def()); } GenTree* addrBase = tree->AsOp()->gtOp1; GenTree* dataHigh = gtLong->AsOp()->gtOp2; GenTree* dataLow = gtLong->AsOp()->gtOp1; GenTree* storeIndLow = tree; Range().Remove(gtLong); Range().Remove(dataHigh); storeIndLow->AsOp()->gtOp2 = dataLow; storeIndLow->gtType = TYP_INT; GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum()); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* storeIndHigh = new (m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh); storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK)); Range().InsertAfter(storeIndLow, dataHigh, addrBaseHigh, addrHigh, storeIndHigh); return storeIndHigh; // Example final output: // // /--* t52 byref // * st.lclVar byref V07 rat0 // t158 = lclVar byref V07 rat0 // t51 = const int 0x37C05E7D // /--* t158 byref // +--* t51 int // * storeIndir int // t154 = const int 0x2A0A3C80 // t159 = lclVar byref V07 rat0 // /--* t159 byref // t160 = * lea(b + 4) ref // /--* t154 int // +--* t160 ref // * storeIndir int } //------------------------------------------------------------------------ // DecomposeInd: Decompose GT_IND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeInd(LIR::Use& use) { GenTree* indLow = use.Def(); LIR::Use address(Range(), &indLow->AsOp()->gtOp1, indLow); address.ReplaceWithLclVar(m_compiler); JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); // Change the type of lower ind. indLow->gtType = TYP_INT; // Create tree of ind(addr+4) GenTree* addrBase = indLow->gtGetOp1(); GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum()); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr); indHigh->gtFlags |= (indLow->gtFlags & (GTF_GLOB_REF | GTF_EXCEPT | GTF_IND_FLAGS)); Range().InsertAfter(indLow, addrBaseHigh, addrHigh, indHigh); return FinalizeDecomposition(use, indLow, indHigh, indHigh); } //------------------------------------------------------------------------ // DecomposeNot: Decompose GT_NOT. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNot(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_NOT); GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); noway_assert(gtLong->OperGet() == GT_LONG); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); Range().Remove(gtLong); GenTree* loResult = tree; loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; GenTree* hiResult = new (m_compiler, GT_NOT) GenTreeOp(GT_NOT, TYP_INT, hiOp1, nullptr); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeNeg: Decompose GT_NEG. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeNeg(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_NEG); GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); noway_assert(gtLong->OperGet() == GT_LONG); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); Range().Remove(gtLong); GenTree* loResult = tree; loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; GenTree* zero = m_compiler->gtNewZeroConNode(TYP_INT); #if defined(TARGET_X86) GenTree* hiAdjust = m_compiler->gtNewOperNode(GT_ADD_HI, TYP_INT, hiOp1, zero); GenTree* hiResult = m_compiler->gtNewOperNode(GT_NEG, TYP_INT, hiAdjust); Range().InsertAfter(loResult, zero, hiAdjust, hiResult); loResult->gtFlags |= GTF_SET_FLAGS; hiAdjust->gtFlags |= GTF_USE_FLAGS; #elif defined(TARGET_ARM) // We tend to use "movs" to load zero to a register, and that sets the flags, so put the // zero before the loResult, which is setting the flags needed by GT_SUB_HI. GenTree* hiResult = m_compiler->gtNewOperNode(GT_SUB_HI, TYP_INT, zero, hiOp1); Range().InsertBefore(loResult, zero); Range().InsertAfter(loResult, hiResult); loResult->gtFlags |= GTF_SET_FLAGS; hiResult->gtFlags |= GTF_USE_FLAGS; #endif return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeArith: Decompose GT_ADD, GT_SUB, GT_OR, GT_XOR, GT_AND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeArith(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); // Both operands must have already been decomposed into GT_LONG operators. noway_assert((op1->OperGet() == GT_LONG) && (op2->OperGet() == GT_LONG)); // Capture the lo and hi halves of op1 and op2. GenTree* loOp1 = op1->gtGetOp1(); GenTree* hiOp1 = op1->gtGetOp2(); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); // Now, remove op1 and op2 from the node list. Range().Remove(op1); Range().Remove(op2); // We will reuse "tree" for the loResult, which will now be of TYP_INT, and its operands // will be the lo halves of op1 from above. GenTree* loResult = tree; loResult->SetOper(GetLoOper(oper)); loResult->gtType = TYP_INT; loResult->AsOp()->gtOp1 = loOp1; loResult->AsOp()->gtOp2 = loOp2; GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2); Range().InsertAfter(loResult, hiResult); if ((oper == GT_ADD) || (oper == GT_SUB)) { loResult->gtFlags |= GTF_SET_FLAGS; hiResult->gtFlags |= GTF_USE_FLAGS; if ((loResult->gtFlags & GTF_OVERFLOW) != 0) { hiResult->gtFlags |= GTF_OVERFLOW | GTF_EXCEPT; loResult->gtFlags &= ~(GTF_OVERFLOW | GTF_EXCEPT); } if (loResult->gtFlags & GTF_UNSIGNED) { hiResult->gtFlags |= GTF_UNSIGNED; } } return FinalizeDecomposition(use, loResult, hiResult, hiResult); } //------------------------------------------------------------------------ // DecomposeShift: Decompose GT_LSH, GT_RSH, GT_RSZ. For shift nodes being shifted // by a constant int, we can inspect the shift amount and decompose to the appropriate // node types, generating a shl/shld pattern for GT_LSH, a shrd/shr pattern for GT_RSZ, // and a shrd/sar pattern for GT_SHR for most shift amounts. Shifting by 0, >= 32 and // >= 64 are special cased to produce better code patterns. // // For all other shift nodes, we need to use the shift helper functions, so we here convert // the shift into a helper call by pulling its arguments out of linear order and making // them the args to a call, then replacing the original node with the new call. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use) { assert(use.IsInitialized()); GenTree* shift = use.Def(); GenTree* gtLong = shift->gtGetOp1(); GenTree* loOp1 = gtLong->gtGetOp1(); GenTree* hiOp1 = gtLong->gtGetOp2(); GenTree* shiftByOp = shift->gtGetOp2(); genTreeOps oper = shift->OperGet(); genTreeOps shiftByOper = shiftByOp->OperGet(); // tLo = ... // ... // tHi = ... // ... // tLong = long tLo, tHi // ... // tShiftAmount = ... // ... // tShift = shift tLong, tShiftAmount assert((oper == GT_LSH) || (oper == GT_RSH) || (oper == GT_RSZ)); // If we are shifting by a constant int, we do not want to use a helper, instead, we decompose. if (shiftByOper == GT_CNS_INT) { // Reduce count modulo 64 to match behavior found in the shift helpers, // Compiler::gtFoldExpr and ValueNumStore::EvalOpIntegral. unsigned int count = shiftByOp->AsIntCon()->gtIconVal & 0x3F; Range().Remove(shiftByOp); if (count == 0) { GenTree* next = shift->gtNext; // Remove shift and don't do anything else. if (shift->IsUnusedValue()) { gtLong->SetUnusedValue(); } Range().Remove(shift); use.ReplaceWith(gtLong); return next; } GenTree* loResult; GenTree* hiResult; GenTree* insertAfter; switch (oper) { case GT_LSH: { if (count < 32) { // For shifts of < 32 bits, we transform the code to: // // tLo = ... // st.lclVar vLo, tLo // ... // tHi = ... // ... // tShiftLo = lsh vLo, tShiftAmountLo // tShitHiLong = long vLo, tHi // tShiftHi = lsh_hi tShiftHiLong, tShiftAmountHi // // This will produce: // // reg1 = lo // shl lo, shift // shld hi, reg1, shift loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); unsigned loOp1LclNum = loOp1->AsLclVarCommon()->GetLclNum(); Range().Remove(loOp1); GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); loResult = m_compiler->gtNewOperNode(GT_LSH, TYP_INT, loOp1, shiftByLo); // Create a GT_LONG that contains loCopy and hiOp1. This will be used in codegen to // generate the shld instruction GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT); GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1); hiResult = m_compiler->gtNewOperNode(GT_LSH_HI, TYP_INT, hiOp, shiftByHi); Range().InsertBefore(shift, loOp1, shiftByLo, loResult); Range().InsertBefore(shift, loCopy, hiOp, shiftByHi, hiResult); insertAfter = hiResult; } else { assert(count >= 32 && count < 64); // Since we're left shifting at least 32 bits, we can remove the hi part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the hi operand while there are no side effects) if ((hiOp1->gtFlags & GTF_ALL_EFFECT) == 0) { Range().Remove(hiOp1, true); } else { hiOp1->SetUnusedValue(); } if (count == 32) { // Move loOp1 into hiResult (shift of 32 bits is just a mov of lo to hi) // We need to make sure that we save lo to a temp variable so that we don't overwrite lo // before saving it to hi in the case that we are doing an inplace shift. I.e.: // x = x << 32 LIR::Use loOp1Use(Range(), &gtLong->AsOp()->gtOp1, gtLong); loOp1Use.ReplaceWithLclVar(m_compiler); hiResult = loOp1Use.Def(); } else { assert(count > 32 && count < 64); // Move loOp1 into hiResult, do a GT_LSH with count - 32. // We will compute hiResult before loResult in this case, so we don't need to store lo to a // temp GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); hiResult = m_compiler->gtNewOperNode(oper, TYP_INT, loOp1, shiftBy); Range().InsertBefore(shift, shiftBy, hiResult); } // Zero out loResult (shift of >= 32 bits shifts all lo bits to hiResult) loResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertBefore(shift, loResult); insertAfter = loResult; } } break; case GT_RSZ: { if (count < 32) { // Hi is a GT_RSZ, lo is a GT_RSH_LO. Will produce: // reg1 = hi // shrd lo, reg1, shift // shr hi, shift hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSZ, TYP_INT, hiOp1, shiftByHi); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shrd instruction GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy); loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo); Range().InsertBefore(shift, hiCopy, loOp); Range().InsertBefore(shift, shiftByLo, loResult); Range().InsertBefore(shift, shiftByHi, hiResult); } else { assert(count >= 32 && count < 64); // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the lo operand while there are no side effects) if ((loOp1->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { Range().Remove(loOp1, true); } else { loOp1->SetUnusedValue(); } if (count == 32) { // Move hiOp1 into loResult. loResult = hiOp1; } else { assert(count > 32 && count < 64); // Move hiOp1 into loResult, do a GT_RSZ with count - 32. GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy); Range().InsertBefore(shift, shiftBy, loResult); } // Zero out hi hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertBefore(shift, hiResult); } insertAfter = hiResult; } break; case GT_RSH: { hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); Range().Remove(hiOp1); if (count < 32) { // Hi is a GT_RSH, lo is a GT_RSH_LO. Will produce: // reg1 = hi // shrd lo, reg1, shift // sar hi, shift GenTree* shiftByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* shiftByLo = m_compiler->gtNewIconNode(count, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiOp1, shiftByHi); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shrd instruction GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy); loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo); Range().InsertBefore(shift, hiCopy, loOp); Range().InsertBefore(shift, shiftByLo, loResult); Range().InsertBefore(shift, shiftByHi, hiOp1, hiResult); } else { assert(count >= 32 && count < 64); // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff // it has no side effects. // // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that // feeds the lo operand while there are no side effects) if ((loOp1->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { Range().Remove(loOp1, true); } else { loOp1->SetUnusedValue(); } if (count == 32) { // Move hiOp1 into loResult. loResult = hiOp1; Range().InsertBefore(shift, loResult); } else { assert(count > 32 && count < 64); // Move hiOp1 into loResult, do a GT_RSH with count - 32. GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy); Range().InsertBefore(shift, hiOp1, shiftBy, loResult); } // Propagate sign bit in hiResult GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT); hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, shiftBy); Range().InsertBefore(shift, shiftBy, hiCopy, hiResult); } insertAfter = hiResult; } break; default: unreached(); } // Remove shift from Range Range().Remove(gtLong); Range().Remove(shift); return FinalizeDecomposition(use, loResult, hiResult, insertAfter); } else { // Because calls must be created as HIR and lowered to LIR, we need to dump // any LIR temps into lclVars before using them as arguments. shiftByOp = RepresentOpAsLocalVar(shiftByOp, shift, &shift->AsOp()->gtOp2); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); Range().Remove(shiftByOp); Range().Remove(gtLong); Range().Remove(loOp1); Range().Remove(hiOp1); unsigned helper; switch (oper) { case GT_LSH: helper = CORINFO_HELP_LLSH; break; case GT_RSH: helper = CORINFO_HELP_LRSH; break; case GT_RSZ: helper = CORINFO_HELP_LRSZ; break; default: unreached(); } GenTreeCall::Use* argList = m_compiler->gtNewCallArgs(loOp1, hiOp1, shiftByOp); GenTreeCall* call = m_compiler->gtNewHelperCallNode(helper, TYP_LONG, argList); call->gtFlags |= shift->gtFlags & GTF_ALL_EFFECT; if (shift->IsUnusedValue()) { call->SetUnusedValue(); } call = m_compiler->fgMorphArgs(call); Range().InsertAfter(shift, LIR::SeqTree(m_compiler, call)); Range().Remove(shift); use.ReplaceWith(call); return call; } } //------------------------------------------------------------------------ // DecomposeRotate: Decompose GT_ROL and GT_ROR with constant shift amounts. We can // inspect the rotate amount and decompose to the appropriate node types, generating // a shld/shld pattern for GT_ROL, a shrd/shrd pattern for GT_ROR, for most rotate // amounts. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeRotate(LIR::Use& use) { GenTree* tree = use.Def(); GenTree* gtLong = tree->gtGetOp1(); GenTree* rotateByOp = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ROL) || (oper == GT_ROR)); assert(rotateByOp->IsCnsIntOrI()); // For longs, we need to change rols into two GT_LSH_HIs and rors into two GT_RSH_LOs // so we will get: // // shld lo, hi, rotateAmount // shld hi, loCopy, rotateAmount // // or: // // shrd lo, hi, rotateAmount // shrd hi, loCopy, rotateAmount if (oper == GT_ROL) { oper = GT_LSH_HI; } else { oper = GT_RSH_LO; } unsigned count = (unsigned)rotateByOp->AsIntCon()->gtIconVal; Range().Remove(rotateByOp); // Make sure the rotate amount is between 0 and 63. assert((count < 64) && (count != 0)); GenTree* loResult; GenTree* hiResult; if (count == 32) { // If the rotate amount is 32, then swap hi and lo LIR::Use loOp1Use(Range(), &gtLong->AsOp()->gtOp1, gtLong); loOp1Use.ReplaceWithLclVar(m_compiler); LIR::Use hiOp1Use(Range(), &gtLong->AsOp()->gtOp2, gtLong); hiOp1Use.ReplaceWithLclVar(m_compiler); hiResult = loOp1Use.Def(); loResult = hiOp1Use.Def(); gtLong->AsOp()->gtOp1 = loResult; gtLong->AsOp()->gtOp2 = hiResult; if (tree->IsUnusedValue()) { gtLong->SetUnusedValue(); } GenTree* next = tree->gtNext; // Remove tree and don't do anything else. Range().Remove(tree); use.ReplaceWith(gtLong); return next; } else { GenTree* loOp1; GenTree* hiOp1; if (count > 32) { // If count > 32, we swap hi and lo, and subtract 32 from count hiOp1 = gtLong->gtGetOp1(); loOp1 = gtLong->gtGetOp2(); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp2); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp1); count -= 32; } else { loOp1 = gtLong->gtGetOp1(); hiOp1 = gtLong->gtGetOp2(); loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->AsOp()->gtOp1); hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->AsOp()->gtOp2); } Range().Remove(gtLong); unsigned loOp1LclNum = loOp1->AsLclVarCommon()->GetLclNum(); unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->GetLclNum(); Range().Remove(loOp1); Range().Remove(hiOp1); GenTree* rotateByHi = m_compiler->gtNewIconNode(count, TYP_INT); GenTree* rotateByLo = m_compiler->gtNewIconNode(count, TYP_INT); // Create a GT_LONG that contains loOp1 and hiCopy. This will be used in codegen to // generate the shld instruction GenTree* hiCopy = m_compiler->gtNewLclvNode(hiOp1LclNum, TYP_INT); GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, hiCopy, loOp1); loResult = m_compiler->gtNewOperNode(oper, TYP_INT, loOp, rotateByLo); // Create a GT_LONG that contains loCopy and hiOp1. This will be used in codegen to // generate the shld instruction GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT); GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1); hiResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp, rotateByHi); Range().InsertBefore(tree, hiCopy, loOp1, loOp); Range().InsertBefore(tree, rotateByLo, loResult); Range().InsertBefore(tree, loCopy, hiOp1, hiOp); Range().InsertBefore(tree, rotateByHi, hiResult); Range().Remove(tree); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } } //------------------------------------------------------------------------ // DecomposeMul: Decompose GT_MUL. The only GT_MULs that make it to decompose are // those with the GTF_MUL_64RSLT flag set. These muls result in a mul instruction that // returns its result in two registers like GT_CALLs do. Additionally, these muls are // guaranteed to be in the form long = (long)int * (long)int. Therefore, to decompose // these nodes, we convert them into GT_MUL_LONGs, undo the cast from int to long by // stripping out the lo ops, and force them into the form var = mul, as we do for // GT_CALLs. In codegen, we then produce a mul instruction that produces the result // in edx:eax on x86 or in any two chosen by RA registers on arm32, and store those // registers on the stack in genStoreLongLclVar. // // All other GT_MULs have been converted to helper calls in morph.cpp // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeMul(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); assert(tree->OperIs(GT_MUL)); assert(tree->Is64RsltMul()); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(op1->TypeIs(TYP_LONG) && op2->TypeIs(TYP_LONG)); // We expect the first operand to be an int->long cast. // DecomposeCast specifically ignores such casts when they are used by GT_MULs. assert(op1->OperIs(GT_CAST)); // The second operand can be a cast or a constant. if (!op2->OperIs(GT_CAST)) { assert(op2->OperIs(GT_LONG)); assert(op2->gtGetOp1()->IsIntegralConst()); assert(op2->gtGetOp2()->IsIntegralConst()); Range().Remove(op2->gtGetOp2()); } Range().Remove(op1); Range().Remove(op2); tree->AsOp()->gtOp1 = op1->gtGetOp1(); tree->AsOp()->gtOp2 = op2->gtGetOp1(); tree->SetOper(GT_MUL_LONG); return StoreNodeToVar(use); } //------------------------------------------------------------------------ // DecomposeUMod: Decompose GT_UMOD. The only GT_UMODs that make it to decompose // are guaranteed to be an unsigned long mod with op2 which is a cast to long from // a constant int whose value is between 2 and 0x3fffffff. All other GT_UMODs are // morphed into helper calls. These GT_UMODs will actually return an int value in // RDX. In decompose, we make the lo operation a TYP_INT GT_UMOD, with op2 as the // original lo half and op1 as a GT_LONG. We make the hi part 0, so we end up with: // // GT_UMOD[TYP_INT] ( GT_LONG [TYP_LONG] (loOp1, hiOp1), loOp2 [TYP_INT] ) // // With the expectation that we will generate: // // EDX = hiOp1 // EAX = loOp1 // reg = loOp2 // idiv reg // EDX is the remainder, and result of GT_UMOD // mov hiReg = 0 // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeUMod(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert(oper == GT_UMOD); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(op1->OperGet() == GT_LONG); assert(op2->OperGet() == GT_LONG); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); assert(loOp2->OperGet() == GT_CNS_INT); assert(hiOp2->OperGet() == GT_CNS_INT); assert((loOp2->AsIntCon()->gtIconVal >= 2) && (loOp2->AsIntCon()->gtIconVal <= 0x3fffffff)); assert(hiOp2->AsIntCon()->gtIconVal == 0); // Get rid of op2's hi part. We don't need it. Range().Remove(hiOp2); Range().Remove(op2); // Lo part is the GT_UMOD GenTree* loResult = tree; loResult->AsOp()->gtOp2 = loOp2; loResult->gtType = TYP_INT; // Set the high part to 0 GenTree* hiResult = m_compiler->gtNewZeroConNode(TYP_INT); Range().InsertAfter(loResult, hiResult); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // DecomposeHWIntrinsic: Decompose GT_HWINTRINSIC. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeHWIntrinsic(LIR::Use& use) { GenTree* tree = use.Def(); assert(tree->OperIs(GT_HWINTRINSIC)); GenTreeHWIntrinsic* hwintrinsicTree = tree->AsHWIntrinsic(); switch (hwintrinsicTree->GetHWIntrinsicId()) { case NI_Vector128_GetElement: case NI_Vector256_GetElement: return DecomposeHWIntrinsicGetElement(use, hwintrinsicTree); default: noway_assert(!"unexpected GT_HWINTRINSIC node in long decomposition"); break; } return nullptr; } //------------------------------------------------------------------------ // DecomposeHWIntrinsicGetElement: Decompose GT_HWINTRINSIC -- NI_Vector*_GetElement. // // Decompose a get[i] node on Vector*<long>. For: // // GT_HWINTRINSIC{GetElement}[long](simd_var, index) // // create: // // tmp_simd_var = simd_var // tmp_index = index // loResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, tmp_index * 2) // hiResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, tmp_index * 2 + 1) // return: GT_LONG(loResult, hiResult) // // This isn't optimal codegen, since NI_Vector*_GetElement sometimes requires // temps that could be shared, for example. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // node - the hwintrinsic node to decompose // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeHWIntrinsicGetElement(LIR::Use& use, GenTreeHWIntrinsic* node) { assert(node == use.Def()); assert(varTypeIsLong(node)); assert((node->GetHWIntrinsicId() == NI_Vector128_GetElement) || (node->GetHWIntrinsicId() == NI_Vector256_GetElement)); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(varTypeIsLong(simdBaseType)); assert(varTypeIsSIMD(op1->TypeGet())); assert(op2->TypeIs(TYP_INT)); bool indexIsConst = op2->OperIsConst(); ssize_t index = 0; if (indexIsConst) { index = op2->AsIntCon()->IconValue(); } GenTree* simdTmpVar = RepresentOpAsLocalVar(op1, node, &node->Op(1)); unsigned simdTmpVarNum = simdTmpVar->AsLclVarCommon()->GetLclNum(); JITDUMP("[DecomposeHWIntrinsicGetElement]: Saving op1 tree to a temp var:\n"); DISPTREERANGE(Range(), simdTmpVar); Range().Remove(simdTmpVar); op1 = node->Op(1); GenTree* indexTmpVar = nullptr; unsigned indexTmpVarNum = 0; if (!indexIsConst) { indexTmpVar = RepresentOpAsLocalVar(op2, node, &node->Op(2)); indexTmpVarNum = indexTmpVar->AsLclVarCommon()->GetLclNum(); JITDUMP("[DecomposeHWIntrinsicGetElement]: Saving op2 tree to a temp var:\n"); DISPTREERANGE(Range(), indexTmpVar); Range().Remove(indexTmpVar); op2 = node->Op(2); } // Create: // loResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, index * 2) GenTree* simdTmpVar1 = simdTmpVar; GenTree* indexTimesTwo1; if (indexIsConst) { // Reuse the existing index constant node. indexTimesTwo1 = op2; Range().Remove(indexTimesTwo1); indexTimesTwo1->AsIntCon()->SetIconValue(index * 2); Range().InsertBefore(node, simdTmpVar1, indexTimesTwo1); } else { GenTree* indexTmpVar1 = indexTmpVar; GenTree* two1 = m_compiler->gtNewIconNode(2, TYP_INT); indexTimesTwo1 = m_compiler->gtNewOperNode(GT_MUL, TYP_INT, indexTmpVar1, two1); Range().InsertBefore(node, simdTmpVar1, indexTmpVar1, two1, indexTimesTwo1); } GenTree* loResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar1, indexTimesTwo1, node->GetHWIntrinsicId(), CORINFO_TYPE_INT, simdSize); Range().InsertBefore(node, loResult); // Create: // hiResult = GT_HWINTRINSIC{GetElement}[int](tmp_simd_var, index * 2 + 1) GenTree* simdTmpVar2 = m_compiler->gtNewLclLNode(simdTmpVarNum, op1->TypeGet()); GenTree* indexTimesTwoPlusOne; if (indexIsConst) { indexTimesTwoPlusOne = m_compiler->gtNewIconNode(index * 2 + 1, TYP_INT); Range().InsertBefore(node, simdTmpVar2, indexTimesTwoPlusOne); } else { GenTree* indexTmpVar2 = m_compiler->gtNewLclLNode(indexTmpVarNum, TYP_INT); GenTree* two2 = m_compiler->gtNewIconNode(2, TYP_INT); GenTree* indexTimesTwo2 = m_compiler->gtNewOperNode(GT_MUL, TYP_INT, indexTmpVar2, two2); GenTree* one = m_compiler->gtNewIconNode(1, TYP_INT); indexTimesTwoPlusOne = m_compiler->gtNewOperNode(GT_ADD, TYP_INT, indexTimesTwo2, one); Range().InsertBefore(node, simdTmpVar2, indexTmpVar2, two2, indexTimesTwo2); Range().InsertBefore(node, one, indexTimesTwoPlusOne); } GenTree* hiResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_INT, simdTmpVar2, indexTimesTwoPlusOne, node->GetHWIntrinsicId(), CORINFO_TYPE_INT, simdSize); Range().InsertBefore(node, hiResult); // Done with the original tree; remove it. Range().Remove(node); return FinalizeDecomposition(use, loResult, hiResult, hiResult); } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // OptimizeCastFromDecomposedLong: optimizes a cast from GT_LONG by discarding // the high part of the source and, if the cast is to INT, the cast node itself. // Accounts for side effects and marks nodes unused as neccessary. // // Only accepts casts to integer types that are not long. // Does not optimize checked casts. // // Arguments: // cast - the cast tree that has a GT_LONG node as its operand. // nextNode - the next candidate for decomposition. // // Return Value: // The next node to process in DecomposeRange: "nextNode->gtNext" if // "cast == nextNode", simply "nextNode" otherwise. // // Notes: // Because "nextNode" usually is "cast", and this method may remove "cast" // from the linear order, it needs to return the updated "nextNode". Instead // of receiving it as an argument, it could assume that "nextNode" is always // "cast->CastOp()->gtNext", but not making that assumption seems better. // GenTree* DecomposeLongs::OptimizeCastFromDecomposedLong(GenTreeCast* cast, GenTree* nextNode) { GenTreeOp* src = cast->CastOp()->AsOp(); var_types dstType = cast->CastToType(); assert(src->OperIs(GT_LONG)); assert(genActualType(dstType) == TYP_INT); if (cast->gtOverflow()) { return nextNode; } GenTree* loSrc = src->gtGetOp1(); GenTree* hiSrc = src->gtGetOp2(); JITDUMP("Optimizing a truncating cast [%06u] from decomposed LONG [%06u]\n", cast->gtTreeID, src->gtTreeID); INDEBUG(GenTree* treeToDisplay = cast); // TODO-CQ: we could go perform this removal transitively. // See also identical code in shift decomposition. if ((hiSrc->gtFlags & (GTF_ALL_EFFECT | GTF_SET_FLAGS)) == 0) { JITDUMP("Removing the HI part of [%06u] and marking its operands unused:\n", src->gtTreeID); DISPNODE(hiSrc); Range().Remove(hiSrc, /* markOperandsUnused */ true); } else { JITDUMP("The HI part of [%06u] has side effects, marking it unused\n", src->gtTreeID); hiSrc->SetUnusedValue(); } JITDUMP("Removing the LONG source:\n"); DISPNODE(src); Range().Remove(src); if (varTypeIsSmall(dstType)) { JITDUMP("Cast is to a small type, keeping it, the new source is [%06u]\n", loSrc->gtTreeID); cast->CastOp() = loSrc; } else { LIR::Use useOfCast; if (Range().TryGetUse(cast, &useOfCast)) { useOfCast.ReplaceWith(loSrc); } else { loSrc->SetUnusedValue(); } if (nextNode == cast) { nextNode = nextNode->gtNext; } INDEBUG(treeToDisplay = loSrc); JITDUMP("Removing the cast:\n"); DISPNODE(cast); Range().Remove(cast); } JITDUMP("Final result:\n") DISPTREERANGE(Range(), treeToDisplay); return nextNode; } //------------------------------------------------------------------------ // StoreNodeToVar: Check if the user is a STORE_LCL_VAR, and if it isn't, // store the node to a var. Then decompose the new LclVar. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::StoreNodeToVar(LIR::Use& use) { if (use.IsDummyUse()) return use.Def()->gtNext; GenTree* tree = use.Def(); GenTree* user = use.User(); if (user->OperGet() == GT_STORE_LCL_VAR) { // If parent is already a STORE_LCL_VAR, we can skip it if // it is already marked as lvIsMultiRegRet. unsigned varNum = user->AsLclVarCommon()->GetLclNum(); if (m_compiler->lvaTable[varNum].lvIsMultiRegRet) { return tree->gtNext; } else if (!m_compiler->lvaTable[varNum].lvPromoted) { // If var wasn't promoted, we can just set lvIsMultiRegRet. m_compiler->lvaTable[varNum].lvIsMultiRegRet = true; return tree->gtNext; } } // Otherwise, we need to force var = call() unsigned varNum = use.ReplaceWithLclVar(m_compiler); m_compiler->lvaTable[varNum].lvIsMultiRegRet = true; // Decompose the new LclVar use return DecomposeLclVar(use); } //------------------------------------------------------------------------ // Check is op already local var, if not store it to local. // // Arguments: // op - GenTree* to represent as local variable // user - user of op // edge - edge from user to op // // Return Value: // op represented as local var // GenTree* DecomposeLongs::RepresentOpAsLocalVar(GenTree* op, GenTree* user, GenTree** edge) { if (op->OperGet() == GT_LCL_VAR) { return op; } else { LIR::Use opUse(Range(), edge, user); opUse.ReplaceWithLclVar(m_compiler); return *edge; } } //------------------------------------------------------------------------ // DecomposeLongs::EnsureIntSized: // Checks to see if the given node produces an int-sized value and // performs the appropriate widening if it does not. // // Arguments: // node - The node that may need to be widened. // signExtend - True if the value should be sign-extended; false if it // should be zero-extended. // // Return Value: // The node that produces the widened value. GenTree* DecomposeLongs::EnsureIntSized(GenTree* node, bool signExtend) { assert(node != nullptr); if (!varTypeIsSmall(node)) { assert(genTypeSize(node) == genTypeSize(TYP_INT)); return node; } if (node->OperIs(GT_LCL_VAR) && !m_compiler->lvaTable[node->AsLclVarCommon()->GetLclNum()].lvNormalizeOnLoad()) { node->gtType = TYP_INT; return node; } GenTree* const cast = m_compiler->gtNewCastNode(TYP_INT, node, !signExtend, node->TypeGet()); Range().InsertAfter(node, cast); return cast; } //------------------------------------------------------------------------ // GetHiOper: Convert arithmetic operator to "high half" operator of decomposed node. // // Arguments: // oper - operator to map // // Return Value: // mapped operator // // static genTreeOps DecomposeLongs::GetHiOper(genTreeOps oper) { switch (oper) { case GT_ADD: return GT_ADD_HI; break; case GT_SUB: return GT_SUB_HI; break; case GT_OR: return GT_OR; break; case GT_AND: return GT_AND; break; case GT_XOR: return GT_XOR; break; default: assert(!"GetHiOper called for invalid oper"); return GT_NONE; } } //------------------------------------------------------------------------ // GetLoOper: Convert arithmetic operator to "low half" operator of decomposed node. // // Arguments: // oper - operator to map // // Return Value: // mapped operator // // static genTreeOps DecomposeLongs::GetLoOper(genTreeOps oper) { switch (oper) { case GT_ADD: return GT_ADD_LO; break; case GT_SUB: return GT_SUB_LO; break; case GT_OR: return GT_OR; break; case GT_AND: return GT_AND; break; case GT_XOR: return GT_XOR; break; default: assert(!"GetLoOper called for invalid oper"); return GT_NONE; } } //------------------------------------------------------------------------ // PromoteLongVars: "Struct promote" all register candidate longs as if they are structs of two ints. // // Arguments: // None. // // Return Value: // None. // void DecomposeLongs::PromoteLongVars() { if (!m_compiler->compEnregLocals()) { return; } // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = m_compiler->lvaCount; for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum); if (!varTypeIsLong(varDsc)) { continue; } if (varDsc->lvDoNotEnregister) { continue; } if (varDsc->lvRefCnt() == 0) { continue; } if (varDsc->lvIsStructField) { continue; } if (m_compiler->fgNoStructPromotion) { continue; } if (m_compiler->fgNoStructParamPromotion && varDsc->lvIsParam) { continue; } assert(!varDsc->lvIsMultiRegArgOrRet()); varDsc->lvFieldCnt = 2; varDsc->lvFieldLclStart = m_compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = false; JITDUMP("\nPromoting long local V%02u:", lclNum); bool isParam = varDsc->lvIsParam; for (unsigned index = 0; index < 2; ++index) { // Grab the temp for the field local. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, index == 0 ? "lo" : "hi", index * 4); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = m_compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); #endif unsigned varNum = m_compiler->lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so // they are long lifetime temps. LclVarDsc* fieldVarDsc = m_compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = TYP_INT; fieldVarDsc->lvExactSize = genTypeSize(TYP_INT); fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFldOffset = (unsigned char)(index * genTypeSize(TYP_INT)); fieldVarDsc->lvFldOrdinal = (unsigned char)index; fieldVarDsc->lvParentLcl = lclNum; // Currently we do not support enregistering incoming promoted aggregates with more than one field. if (isParam) { fieldVarDsc->lvIsParam = true; m_compiler->lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LongParamField)); } } } #ifdef DEBUG if (m_compiler->verbose) { printf("\nlvaTable after PromoteLongVars\n"); m_compiler->lvaTableDump(); } #endif // DEBUG } #endif // !defined(TARGET_64BIT)
-1
dotnet/runtime
66,204
Delete `compUnsafeCastUsed`
Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
SingleAccretion
2022-03-04T18:35:50Z
2022-03-04T22:59:19Z
136b312bc4b471c92eeaf25ded3d0b4dec3afd13
da51f60525f96cceef3484eaa140c3b84e7484ab
Delete `compUnsafeCastUsed`. Unused. [No diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1645724&view=ms.vss-build-web.run-extensions-tab).
./src/tests/JIT/SIMD/VectorMatrix_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="VectorMatrix.cs" /> <Compile Include="VectorUtil.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize /> </PropertyGroup> <ItemGroup> <Compile Include="VectorMatrix.cs" /> <Compile Include="VectorUtil.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.RegularExpressions/gen/RegexGenerator.Emitter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers.Binary; using System.CodeDom.Compiler; using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; using System.Diagnostics; using System.Globalization; using System.IO; using System.Linq; using System.Runtime.InteropServices; using System.Threading; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp; // NOTE: The logic in this file is largely a copy of logic in RegexCompiler, emitting C# instead of MSIL. // Most changes made to this file should be kept in sync, so far as bug fixes and relevant optimizations // are concerned. namespace System.Text.RegularExpressions.Generator { public partial class RegexGenerator { /// <summary>Code for a [GeneratedCode] attribute to put on the top-level generated members.</summary> private static readonly string s_generatedCodeAttribute = $"[global::System.CodeDom.Compiler.GeneratedCodeAttribute(\"{typeof(RegexGenerator).Assembly.GetName().Name}\", \"{typeof(RegexGenerator).Assembly.GetName().Version}\")]"; /// <summary>Header comments and usings to include at the top of every generated file.</summary> private static readonly string[] s_headers = new string[] { "// <auto-generated/>", "#nullable enable", "#pragma warning disable CS0162 // Unreachable code", "#pragma warning disable CS0164 // Unreferenced label", "#pragma warning disable CS0219 // Variable assigned but never used", "", }; /// <summary>Generates the code for one regular expression class.</summary> private static (string, ImmutableArray<Diagnostic>) EmitRegexType(RegexType regexClass, bool allowUnsafe) { var sb = new StringBuilder(1024); var writer = new IndentedTextWriter(new StringWriter(sb)); // Emit the namespace if (!string.IsNullOrWhiteSpace(regexClass.Namespace)) { writer.WriteLine($"namespace {regexClass.Namespace}"); writer.WriteLine("{"); writer.Indent++; } // Emit containing types RegexType? parent = regexClass.ParentClass; var parentClasses = new Stack<string>(); while (parent is not null) { parentClasses.Push($"partial {parent.Keyword} {parent.Name}"); parent = parent.ParentClass; } while (parentClasses.Count != 0) { writer.WriteLine($"{parentClasses.Pop()}"); writer.WriteLine("{"); writer.Indent++; } // Emit the direct parent type writer.WriteLine($"partial {regexClass.Keyword} {regexClass.Name}"); writer.WriteLine("{"); writer.Indent++; // Generate a name to describe the regex instance. This includes the method name // the user provided and a non-randomized (for determinism) hash of it to try to make // the name that much harder to predict. Debug.Assert(regexClass.Method is not null); string generatedName = $"GeneratedRegex_{regexClass.Method.MethodName}_"; generatedName += ComputeStringHash(generatedName).ToString("X"); // Generate the regex type ImmutableArray<Diagnostic> diagnostics = EmitRegexMethod(writer, regexClass.Method, generatedName, allowUnsafe); while (writer.Indent != 0) { writer.Indent--; writer.WriteLine("}"); } writer.Flush(); return (sb.ToString(), diagnostics); // FNV-1a hash function. The actual algorithm used doesn't matter; just something simple // to create a deterministic, pseudo-random value that's based on input text. static uint ComputeStringHash(string s) { uint hashCode = 2166136261; foreach (char c in s) { hashCode = (c ^ hashCode) * 16777619; } return hashCode; } } /// <summary>Gets whether a given regular expression method is supported by the code generator.</summary> private static bool SupportsCodeGeneration(RegexMethod rm, out string? reason) { RegexNode root = rm.Tree.Root; if (!root.SupportsCompilation(out reason)) { return false; } if (ExceedsMaxDepthForSimpleCodeGeneration(root, allowedDepth: 40)) { // Deep RegexNode trees can result in emitting C# code that exceeds C# compiler // limitations, leading to "CS8078: An expression is too long or complex to compile". // Place an artificial limit on max tree depth in order to mitigate such issues. // The allowed depth can be tweaked as needed;its exceedingly rare to find // expressions with such deep trees. reason = "the regex will result in code that may exceed C# compiler limits"; return false; } return true; static bool ExceedsMaxDepthForSimpleCodeGeneration(RegexNode node, int allowedDepth) { if (allowedDepth <= 0) { return true; } int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { if (ExceedsMaxDepthForSimpleCodeGeneration(node.Child(i), allowedDepth - 1)) { return true; } } return false; } } /// <summary>Generates the code for a regular expression method.</summary> private static ImmutableArray<Diagnostic> EmitRegexMethod(IndentedTextWriter writer, RegexMethod rm, string id, bool allowUnsafe) { string patternExpression = Literal(rm.Pattern); string optionsExpression = Literal(rm.Options); string timeoutExpression = rm.MatchTimeout == Timeout.Infinite ? "global::System.Threading.Timeout.InfiniteTimeSpan" : $"global::System.TimeSpan.FromMilliseconds({rm.MatchTimeout.ToString(CultureInfo.InvariantCulture)})"; writer.WriteLine(s_generatedCodeAttribute); writer.WriteLine($"{rm.Modifiers} global::System.Text.RegularExpressions.Regex {rm.MethodName}() => {id}.Instance;"); writer.WriteLine(); writer.WriteLine(s_generatedCodeAttribute); writer.WriteLine("[global::System.ComponentModel.EditorBrowsable(global::System.ComponentModel.EditorBrowsableState.Never)]"); writer.WriteLine($"{(writer.Indent != 0 ? "private" : "internal")} sealed class {id} : global::System.Text.RegularExpressions.Regex"); writer.WriteLine("{"); writer.Write(" public static global::System.Text.RegularExpressions.Regex Instance { get; } = "); // If we can't support custom generation for this regex, spit out a Regex constructor call. if (!SupportsCodeGeneration(rm, out string? reason)) { writer.WriteLine(); writer.WriteLine($"// Cannot generate Regex-derived implementation because {reason}."); writer.WriteLine($"new global::System.Text.RegularExpressions.Regex({patternExpression}, {optionsExpression}, {timeoutExpression});"); writer.WriteLine("}"); return ImmutableArray.Create(Diagnostic.Create(DiagnosticDescriptors.LimitedSourceGeneration, rm.MethodSyntax.GetLocation())); } AnalysisResults analysis = RegexTreeAnalyzer.Analyze(rm.Tree); writer.WriteLine($"new {id}();"); writer.WriteLine(); writer.WriteLine($" private {id}()"); writer.WriteLine($" {{"); writer.WriteLine($" base.pattern = {patternExpression};"); writer.WriteLine($" base.roptions = {optionsExpression};"); writer.WriteLine($" base.internalMatchTimeout = {timeoutExpression};"); writer.WriteLine($" base.factory = new RunnerFactory();"); if (rm.Tree.CaptureNumberSparseMapping is not null) { writer.Write(" base.Caps = new global::System.Collections.Hashtable {"); AppendHashtableContents(writer, rm.Tree.CaptureNumberSparseMapping); writer.WriteLine(" };"); } if (rm.Tree.CaptureNameToNumberMapping is not null) { writer.Write(" base.CapNames = new global::System.Collections.Hashtable {"); AppendHashtableContents(writer, rm.Tree.CaptureNameToNumberMapping); writer.WriteLine(" };"); } if (rm.Tree.CaptureNames is not null) { writer.Write(" base.capslist = new string[] {"); string separator = ""; foreach (string s in rm.Tree.CaptureNames) { writer.Write(separator); writer.Write(Literal(s)); separator = ", "; } writer.WriteLine(" };"); } writer.WriteLine($" base.capsize = {rm.Tree.CaptureCount};"); writer.WriteLine($" }}"); writer.WriteLine(" "); writer.WriteLine($" private sealed class RunnerFactory : global::System.Text.RegularExpressions.RegexRunnerFactory"); writer.WriteLine($" {{"); writer.WriteLine($" protected override global::System.Text.RegularExpressions.RegexRunner CreateInstance() => new Runner();"); writer.WriteLine(); writer.WriteLine($" private sealed class Runner : global::System.Text.RegularExpressions.RegexRunner"); writer.WriteLine($" {{"); // Main implementation methods writer.WriteLine(" // Description:"); DescribeExpression(writer, rm.Tree.Root.Child(0), " // ", analysis); // skip implicit root capture writer.WriteLine(); writer.WriteLine($" protected override void Scan(global::System.ReadOnlySpan<char> text)"); writer.WriteLine($" {{"); writer.Indent += 4; EmitScan(writer, rm, id); writer.Indent -= 4; writer.WriteLine($" }}"); writer.WriteLine(); writer.WriteLine($" private bool TryFindNextPossibleStartingPosition(global::System.ReadOnlySpan<char> inputSpan)"); writer.WriteLine($" {{"); writer.Indent += 4; RequiredHelperFunctions requiredHelpers = EmitTryFindNextPossibleStartingPosition(writer, rm, id); writer.Indent -= 4; writer.WriteLine($" }}"); writer.WriteLine(); if (allowUnsafe) { writer.WriteLine($" [global::System.Runtime.CompilerServices.SkipLocalsInit]"); } writer.WriteLine($" private bool TryMatchAtCurrentPosition(global::System.ReadOnlySpan<char> inputSpan)"); writer.WriteLine($" {{"); writer.Indent += 4; requiredHelpers |= EmitTryMatchAtCurrentPosition(writer, rm, id, analysis); writer.Indent -= 4; writer.WriteLine($" }}"); if ((requiredHelpers & RequiredHelperFunctions.IsWordChar) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character is part of the [\\w] set.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsWordChar(char ch)"); writer.WriteLine($" {{"); writer.WriteLine($" global::System.ReadOnlySpan<byte> ascii = new byte[]"); writer.WriteLine($" {{"); writer.WriteLine($" 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03,"); writer.WriteLine($" 0xFE, 0xFF, 0xFF, 0x87, 0xFE, 0xFF, 0xFF, 0x07"); writer.WriteLine($" }};"); writer.WriteLine(); writer.WriteLine($" int chDiv8 = ch >> 3;"); writer.WriteLine($" return (uint)chDiv8 < (uint)ascii.Length ?"); writer.WriteLine($" (ascii[chDiv8] & (1 << (ch & 0x7))) != 0 :"); writer.WriteLine($" global::System.Globalization.CharUnicodeInfo.GetUnicodeCategory(ch) switch"); writer.WriteLine($" {{"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.UppercaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.LowercaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.TitlecaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.ModifierLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.OtherLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.NonSpacingMark or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.DecimalDigitNumber or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.ConnectorPunctuation => true,"); writer.WriteLine($" _ => false,"); writer.WriteLine($" }};"); writer.WriteLine($" }}"); } if ((requiredHelpers & RequiredHelperFunctions.IsBoundary) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character at the specified index is a boundary.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsBoundary(global::System.ReadOnlySpan<char> inputSpan, int index)"); writer.WriteLine($" {{"); writer.WriteLine($" int indexM1 = index - 1;"); writer.WriteLine($" return ((uint)indexM1 < (uint)inputSpan.Length && IsBoundaryWordChar(inputSpan[indexM1])) !="); writer.WriteLine($" ((uint)index < (uint)inputSpan.Length && IsBoundaryWordChar(inputSpan[index]));"); writer.WriteLine(); writer.WriteLine($" static bool IsBoundaryWordChar(char ch) =>"); writer.WriteLine($" IsWordChar(ch) || (ch == '\\u200C' | ch == '\\u200D');"); writer.WriteLine($" }}"); } if ((requiredHelpers & RequiredHelperFunctions.IsECMABoundary) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character at the specified index is a boundary.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsECMABoundary(global::System.ReadOnlySpan<char> inputSpan, int index)"); writer.WriteLine($" {{"); writer.WriteLine($" int indexM1 = index - 1;"); writer.WriteLine($" return ((uint)indexM1 < (uint)inputSpan.Length && IsECMAWordChar(inputSpan[indexM1])) !="); writer.WriteLine($" ((uint)index < (uint)inputSpan.Length && IsECMAWordChar(inputSpan[index]));"); writer.WriteLine(); writer.WriteLine($" static bool IsECMAWordChar(char ch) =>"); writer.WriteLine($" ((((uint)ch - 'A') & ~0x20) < 26) || // ASCII letter"); writer.WriteLine($" (((uint)ch - '0') < 10) || // digit"); writer.WriteLine($" ch == '_' || // underscore"); writer.WriteLine($" ch == '\\u0130'; // latin capital letter I with dot above"); writer.WriteLine($" }}"); } writer.WriteLine($" }}"); writer.WriteLine($" }}"); writer.WriteLine("}"); return ImmutableArray<Diagnostic>.Empty; static void AppendHashtableContents(IndentedTextWriter writer, Hashtable ht) { IDictionaryEnumerator en = ht.GetEnumerator(); string separator = ""; while (en.MoveNext()) { writer.Write(separator); separator = ", "; writer.Write(" { "); if (en.Key is int key) { writer.Write(key); } else { writer.Write($"\"{en.Key}\""); } writer.Write($", {en.Value} }} "); } } } /// <summary>Emits the body of the Scan method override.</summary> private static void EmitScan(IndentedTextWriter writer, RegexMethod rm, string id) { using (EmitBlock(writer, "while (TryFindNextPossibleStartingPosition(text))")) { if (rm.MatchTimeout != Timeout.Infinite) { writer.WriteLine("base.CheckTimeout();"); writer.WriteLine(); } writer.WriteLine("// If we find a match on the current position, or we have reached the end of the input, we are done."); using (EmitBlock(writer, "if (TryMatchAtCurrentPosition(text) || base.runtextpos == text.Length)")) { writer.WriteLine("return;"); } writer.WriteLine(); writer.WriteLine("base.runtextpos++;"); } } /// <summary>Emits the body of the TryFindNextPossibleStartingPosition.</summary> private static RequiredHelperFunctions EmitTryFindNextPossibleStartingPosition(IndentedTextWriter writer, RegexMethod rm, string id) { RegexOptions options = (RegexOptions)rm.Options; RegexTree regexTree = rm.Tree; bool hasTextInfo = false; RequiredHelperFunctions requiredHelpers = RequiredHelperFunctions.None; // In some cases, we need to emit declarations at the beginning of the method, but we only discover we need them later. // To handle that, we build up a collection of all the declarations to include, track where they should be inserted, // and then insert them at that position once everything else has been output. var additionalDeclarations = new HashSet<string>(); // Emit locals initialization writer.WriteLine("int pos = base.runtextpos;"); writer.Flush(); int additionalDeclarationsPosition = ((StringWriter)writer.InnerWriter).GetStringBuilder().Length; int additionalDeclarationsIndent = writer.Indent; writer.WriteLine(); // Generate length check. If the input isn't long enough to possibly match, fail quickly. // It's rare for min required length to be 0, so we don't bother special-casing the check, // especially since we want the "return false" code regardless. int minRequiredLength = rm.Tree.FindOptimizations.MinRequiredLength; Debug.Assert(minRequiredLength >= 0); string clause = minRequiredLength switch { 0 => "if (pos <= inputSpan.Length)", 1 => "if (pos < inputSpan.Length)", _ => $"if (pos < inputSpan.Length - {minRequiredLength - 1})" }; using (EmitBlock(writer, clause)) { // Emit any anchors. if (!EmitAnchors()) { // Either anchors weren't specified, or they don't completely root all matches to a specific location. // If whatever search operation we need to perform entails case-insensitive operations // that weren't already handled via creation of sets, we need to get an store the // TextInfo object to use (unless RegexOptions.CultureInvariant was specified). EmitTextInfo(writer, ref hasTextInfo, rm); // Emit the code for whatever find mode has been determined. switch (regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingPrefix_LeftToRight_CaseSensitive: Debug.Assert(!string.IsNullOrEmpty(regexTree.FindOptimizations.LeadingCaseSensitivePrefix)); EmitIndexOf(regexTree.FindOptimizations.LeadingCaseSensitivePrefix); break; case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive: Debug.Assert(regexTree.FindOptimizations.FixedDistanceSets is { Count: > 0 }); EmitFixedSet(); break; case FindNextStartingPositionMode.LiteralAfterLoop_LeftToRight_CaseSensitive: Debug.Assert(regexTree.FindOptimizations.LiteralAfterLoop is not null); EmitLiteralAfterAtomicLoop(); break; default: Debug.Fail($"Unexpected mode: {regexTree.FindOptimizations.FindMode}"); goto case FindNextStartingPositionMode.NoSearch; case FindNextStartingPositionMode.NoSearch: writer.WriteLine("return true;"); break; } } } writer.WriteLine(); const string NoStartingPositionFound = "NoStartingPositionFound"; writer.WriteLine("// No starting position found"); writer.WriteLine($"{NoStartingPositionFound}:"); writer.WriteLine("base.runtextpos = inputSpan.Length;"); writer.WriteLine("return false;"); // We're done. Patch up any additional declarations. ReplaceAdditionalDeclarations(writer, additionalDeclarations, additionalDeclarationsPosition, additionalDeclarationsIndent); return requiredHelpers; // Emit a goto for the specified label. void Goto(string label) => writer.WriteLine($"goto {label};"); // Emits any anchors. Returns true if the anchor roots any match to a specific location and thus no further // searching is required; otherwise, false. bool EmitAnchors() { // Anchors that fully implement TryFindNextPossibleStartingPosition, with a check that leads to immediate success or failure determination. switch (regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Beginning: writer.WriteLine("// Beginning \\A anchor"); using (EmitBlock(writer, "if (pos > 0)")) { Goto(NoStartingPositionFound); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Start: writer.WriteLine("// Start \\G anchor"); using (EmitBlock(writer, "if (pos > base.runtextstart)")) { Goto(NoStartingPositionFound); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_EndZ: writer.WriteLine("// Leading end \\Z anchor"); using (EmitBlock(writer, "if (pos < inputSpan.Length - 1)")) { writer.WriteLine("base.runtextpos = inputSpan.Length - 1;"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_End: writer.WriteLine("// Leading end \\z anchor"); using (EmitBlock(writer, "if (pos < inputSpan.Length)")) { writer.WriteLine("base.runtextpos = inputSpan.Length;"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ: // Jump to the end, minus the min required length, which in this case is actually the fixed length, minus 1 (for a possible ending \n). writer.WriteLine("// Trailing end \\Z anchor with fixed-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength + 1})")) { writer.WriteLine($"base.runtextpos = inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength + 1};"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_End: // Jump to the end, minus the min required length, which in this case is actually the fixed length. writer.WriteLine("// Trailing end \\z anchor with fixed-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength})")) { writer.WriteLine($"base.runtextpos = inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength};"); } writer.WriteLine("return true;"); return true; } // Now handle anchors that boost the position but may not determine immediate success or failure. switch (regexTree.FindOptimizations.LeadingAnchor) { case RegexNodeKind.Bol: // Optimize the handling of a Beginning-Of-Line (BOL) anchor. BOL is special, in that unlike // other anchors like Beginning, there are potentially multiple places a BOL can match. So unlike // the other anchors, which all skip all subsequent processing if found, with BOL we just use it // to boost our position to the next line, and then continue normally with any searches. writer.WriteLine("// Beginning-of-line anchor"); using (EmitBlock(writer, "if (pos > 0 && inputSpan[pos - 1] != '\\n')")) { writer.WriteLine("int newlinePos = global::System.MemoryExtensions.IndexOf(inputSpan.Slice(pos), '\\n');"); using (EmitBlock(writer, "if ((uint)newlinePos > inputSpan.Length - pos - 1)")) { Goto(NoStartingPositionFound); } writer.WriteLine("pos = newlinePos + pos + 1;"); } writer.WriteLine(); break; } switch (regexTree.FindOptimizations.TrailingAnchor) { case RegexNodeKind.End when regexTree.FindOptimizations.MaxPossibleLength is int maxLength: writer.WriteLine("// End \\z anchor with maximum-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {maxLength})")) { writer.WriteLine($"pos = inputSpan.Length - {maxLength};"); } writer.WriteLine(); break; case RegexNodeKind.EndZ when regexTree.FindOptimizations.MaxPossibleLength is int maxLength: writer.WriteLine("// End \\Z anchor with maximum-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {maxLength + 1})")) { writer.WriteLine($"pos = inputSpan.Length - {maxLength + 1};"); } writer.WriteLine(); break; } return false; } // Emits a case-sensitive prefix search for a string at the beginning of the pattern. void EmitIndexOf(string prefix) { writer.WriteLine($"int i = global::System.MemoryExtensions.IndexOf(inputSpan.Slice(pos), {Literal(prefix)});"); writer.WriteLine("if (i >= 0)"); writer.WriteLine("{"); writer.WriteLine(" base.runtextpos = pos + i;"); writer.WriteLine(" return true;"); writer.WriteLine("}"); } // Emits a search for a set at a fixed position from the start of the pattern, // and potentially other sets at other fixed positions in the pattern. void EmitFixedSet() { List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)>? sets = regexTree.FindOptimizations.FixedDistanceSets; (char[]? Chars, string Set, int Distance, bool CaseInsensitive) primarySet = sets![0]; const int MaxSets = 4; int setsToUse = Math.Min(sets.Count, MaxSets); // If we can use IndexOf{Any}, try to accelerate the skip loop via vectorization to match the first prefix. // We can use it if this is a case-sensitive class with a small number of characters in the class. int setIndex = 0; bool canUseIndexOf = !primarySet.CaseInsensitive && primarySet.Chars is not null; bool needLoop = !canUseIndexOf || setsToUse > 1; FinishEmitScope loopBlock = default; if (needLoop) { writer.WriteLine("global::System.ReadOnlySpan<char> span = inputSpan.Slice(pos);"); string upperBound = "span.Length" + (setsToUse > 1 || primarySet.Distance != 0 ? $" - {minRequiredLength - 1}" : ""); loopBlock = EmitBlock(writer, $"for (int i = 0; i < {upperBound}; i++)"); } if (canUseIndexOf) { string span = needLoop ? "span" : "inputSpan.Slice(pos)"; span = (needLoop, primarySet.Distance) switch { (false, 0) => span, (true, 0) => $"{span}.Slice(i)", (false, _) => $"{span}.Slice({primarySet.Distance})", (true, _) => $"{span}.Slice(i + {primarySet.Distance})", }; string indexOf = primarySet.Chars!.Length switch { 1 => $"global::System.MemoryExtensions.IndexOf({span}, {Literal(primarySet.Chars[0])})", 2 => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(primarySet.Chars[0])}, {Literal(primarySet.Chars[1])})", 3 => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(primarySet.Chars[0])}, {Literal(primarySet.Chars[1])}, {Literal(primarySet.Chars[2])})", _ => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(new string(primarySet.Chars))})", }; if (needLoop) { writer.WriteLine($"int indexOfPos = {indexOf};"); using (EmitBlock(writer, "if (indexOfPos < 0)")) { Goto(NoStartingPositionFound); } writer.WriteLine("i += indexOfPos;"); writer.WriteLine(); if (setsToUse > 1) { using (EmitBlock(writer, $"if (i >= span.Length - {minRequiredLength - 1})")) { Goto(NoStartingPositionFound); } writer.WriteLine(); } } else { writer.WriteLine($"int i = {indexOf};"); using (EmitBlock(writer, "if (i >= 0)")) { writer.WriteLine("base.runtextpos = pos + i;"); writer.WriteLine("return true;"); } } setIndex = 1; } if (needLoop) { Debug.Assert(setIndex == 0 || setIndex == 1); bool hasCharClassConditions = false; if (setIndex < setsToUse) { // if (CharInClass(textSpan[i + charClassIndex], prefix[0], "...") && // ...) Debug.Assert(needLoop); int start = setIndex; for (; setIndex < setsToUse; setIndex++) { string spanIndex = $"span[i{(sets[setIndex].Distance > 0 ? $" + {sets[setIndex].Distance}" : "")}]"; string charInClassExpr = MatchCharacterClass(hasTextInfo, options, spanIndex, sets[setIndex].Set, sets[setIndex].CaseInsensitive, negate: false, additionalDeclarations, ref requiredHelpers); if (setIndex == start) { writer.Write($"if ({charInClassExpr}"); } else { writer.WriteLine(" &&"); writer.Write($" {charInClassExpr}"); } } writer.WriteLine(")"); hasCharClassConditions = true; } using (hasCharClassConditions ? EmitBlock(writer, null) : default) { writer.WriteLine("base.runtextpos = pos + i;"); writer.WriteLine("return true;"); } } loopBlock.Dispose(); } // Emits a search for a literal following a leading atomic single-character loop. void EmitLiteralAfterAtomicLoop() { Debug.Assert(regexTree.FindOptimizations.LiteralAfterLoop is not null); (RegexNode LoopNode, (char Char, string? String, char[]? Chars) Literal) target = regexTree.FindOptimizations.LiteralAfterLoop.Value; Debug.Assert(target.LoopNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic); Debug.Assert(target.LoopNode.N == int.MaxValue); using (EmitBlock(writer, "while (true)")) { writer.WriteLine($"global::System.ReadOnlySpan<char> slice = inputSpan.Slice(pos);"); writer.WriteLine(); // Find the literal. If we can't find it, we're done searching. writer.Write("int i = global::System.MemoryExtensions."); writer.WriteLine( target.Literal.String is string literalString ? $"IndexOf(slice, {Literal(literalString)});" : target.Literal.Chars is not char[] literalChars ? $"IndexOf(slice, {Literal(target.Literal.Char)});" : literalChars.Length switch { 2 => $"IndexOfAny(slice, {Literal(literalChars[0])}, {Literal(literalChars[1])});", 3 => $"IndexOfAny(slice, {Literal(literalChars[0])}, {Literal(literalChars[1])}, {Literal(literalChars[2])});", _ => $"IndexOfAny(slice, {Literal(new string(literalChars))});", }); using (EmitBlock(writer, $"if (i < 0)")) { writer.WriteLine("break;"); } writer.WriteLine(); // We found the literal. Walk backwards from it finding as many matches as we can against the loop. writer.WriteLine("int prev = i;"); writer.WriteLine($"while ((uint)--prev < (uint)slice.Length && {MatchCharacterClass(hasTextInfo, options, "slice[prev]", target.LoopNode.Str!, caseInsensitive: false, negate: false, additionalDeclarations, ref requiredHelpers)});"); if (target.LoopNode.M > 0) { // If we found fewer than needed, loop around to try again. The loop doesn't overlap with the literal, // so we can start from after the last place the literal matched. writer.WriteLine($"if ((i - prev - 1) < {target.LoopNode.M})"); writer.WriteLine("{"); writer.WriteLine(" pos += i + 1;"); writer.WriteLine(" continue;"); writer.WriteLine("}"); } writer.WriteLine(); // We have a winner. The starting position is just after the last position that failed to match the loop. // TODO: It'd be nice to be able to communicate i as a place the matching engine can start matching // after the loop, so that it doesn't need to re-match the loop. writer.WriteLine("base.runtextpos = pos + prev + 1;"); writer.WriteLine("return true;"); } } // If a TextInfo is needed to perform ToLower operations, emits a local initialized to the TextInfo to use. static void EmitTextInfo(IndentedTextWriter writer, ref bool hasTextInfo, RegexMethod rm) { // Emit local to store current culture if needed if ((rm.Options & RegexOptions.CultureInvariant) == 0) { bool needsCulture = rm.Tree.FindOptimizations.FindMode switch { FindNextStartingPositionMode.FixedLiteral_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive => true, _ when rm.Tree.FindOptimizations.FixedDistanceSets is List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)> sets => sets.Exists(set => set.CaseInsensitive), _ => false, }; if (needsCulture) { hasTextInfo = true; writer.WriteLine("global::System.Globalization.TextInfo textInfo = global::System.Globalization.CultureInfo.CurrentCulture.TextInfo;"); } } } } /// <summary>Emits the body of the TryMatchAtCurrentPosition.</summary> private static RequiredHelperFunctions EmitTryMatchAtCurrentPosition(IndentedTextWriter writer, RegexMethod rm, string id, AnalysisResults analysis) { // In .NET Framework and up through .NET Core 3.1, the code generated for RegexOptions.Compiled was effectively an unrolled // version of what RegexInterpreter would process. The RegexNode tree would be turned into a series of opcodes via // RegexWriter; the interpreter would then sit in a loop processing those opcodes, and the RegexCompiler iterated through the // opcodes generating code for each equivalent to what the interpreter would do albeit with some decisions made at compile-time // rather than at run-time. This approach, however, lead to complicated code that wasn't pay-for-play (e.g. a big backtracking // jump table that all compilations went through even if there was no backtracking), that didn't factor in the shape of the // tree (e.g. it's difficult to add optimizations based on interactions between nodes in the graph), and that didn't read well // when decompiled from IL to C# or when directly emitted as C# as part of a source generator. // // This implementation is instead based on directly walking the RegexNode tree and outputting code for each node in the graph. // A dedicated for each kind of RegexNode emits the code necessary to handle that node's processing, including recursively // calling the relevant function for any of its children nodes. Backtracking is handled not via a giant jump table, but instead // by emitting direct jumps to each backtracking construct. This is achieved by having all match failures jump to a "done" // label that can be changed by a previous emitter, e.g. before EmitLoop returns, it ensures that "doneLabel" is set to the // label that code should jump back to when backtracking. That way, a subsequent EmitXx function doesn't need to know exactly // where to jump: it simply always jumps to "doneLabel" on match failure, and "doneLabel" is always configured to point to // the right location. In an expression without backtracking, or before any backtracking constructs have been encountered, // "doneLabel" is simply the final return location from the TryMatchAtCurrentPosition method that will undo any captures and exit, signaling to // the calling scan loop that nothing was matched. // Arbitrary limit for unrolling vs creating a loop. We want to balance size in the generated // code with other costs, like the (small) overhead of slicing to create the temp span to iterate. const int MaxUnrollSize = 16; RegexOptions options = (RegexOptions)rm.Options; RegexTree regexTree = rm.Tree; RequiredHelperFunctions requiredHelpers = RequiredHelperFunctions.None; // Helper to define names. Names start unadorned, but as soon as there's repetition, // they begin to have a numbered suffix. var usedNames = new Dictionary<string, int>(); // Every RegexTree is rooted in the implicit Capture for the whole expression. // Skip the Capture node. We handle the implicit root capture specially. RegexNode node = regexTree.Root; Debug.Assert(node.Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); Debug.Assert(node.ChildCount() == 1, "Capture nodes should have one child"); node = node.Child(0); // In some limited cases, TryFindNextPossibleStartingPosition will only return true if it successfully matched the whole expression. // We can special case these to do essentially nothing in TryMatchAtCurrentPosition other than emit the capture. switch (node.Kind) { case RegexNodeKind.Multi or RegexNodeKind.Notone or RegexNodeKind.One or RegexNodeKind.Set when !IsCaseInsensitive(node): // This is the case for single and multiple characters, though the whole thing is only guaranteed // to have been validated in TryFindNextPossibleStartingPosition when doing case-sensitive comparison. writer.WriteLine($"int start = base.runtextpos;"); writer.WriteLine($"int end = start + {(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1)};"); writer.WriteLine("base.Capture(0, start, end);"); writer.WriteLine("base.runtextpos = end;"); writer.WriteLine("return true;"); return requiredHelpers; case RegexNodeKind.Empty: // This case isn't common in production, but it's very common when first getting started with the // source generator and seeing what happens as you add more to expressions. When approaching // it from a learning perspective, this is very common, as it's the empty string you start with. writer.WriteLine("base.Capture(0, base.runtextpos, base.runtextpos);"); writer.WriteLine("return true;"); return requiredHelpers; } // In some cases, we need to emit declarations at the beginning of the method, but we only discover we need them later. // To handle that, we build up a collection of all the declarations to include, track where they should be inserted, // and then insert them at that position once everything else has been output. var additionalDeclarations = new HashSet<string>(); var additionalLocalFunctions = new Dictionary<string, string[]>(); // Declare some locals. string sliceSpan = "slice"; writer.WriteLine("int pos = base.runtextpos;"); writer.WriteLine($"int original_pos = pos;"); bool hasTimeout = EmitLoopTimeoutCounterIfNeeded(writer, rm); bool hasTextInfo = EmitInitializeCultureForTryMatchAtCurrentPositionIfNecessary(writer, rm, analysis); writer.Flush(); int additionalDeclarationsPosition = ((StringWriter)writer.InnerWriter).GetStringBuilder().Length; int additionalDeclarationsIndent = writer.Indent; // The implementation tries to use const indexes into the span wherever possible, which we can do // for all fixed-length constructs. In such cases (e.g. single chars, repeaters, strings, etc.) // we know at any point in the regex exactly how far into it we are, and we can use that to index // into the span created at the beginning of the routine to begin at exactly where we're starting // in the input. When we encounter a variable-length construct, we transfer the static value to // pos, slicing the inputSpan appropriately, and then zero out the static position. int sliceStaticPos = 0; SliceInputSpan(writer, defineLocal: true); writer.WriteLine(); // doneLabel starts out as the top-level label for the whole expression failing to match. However, // it may be changed by the processing of a node to point to whereever subsequent match failures // should jump to, in support of backtracking or other constructs. For example, before emitting // the code for a branch N, an alternation will set the the doneLabel to point to the label for // processing the next branch N+1: that way, any failures in the branch N's processing will // implicitly end up jumping to the right location without needing to know in what context it's used. string doneLabel = ReserveName("NoMatch"); string topLevelDoneLabel = doneLabel; // Check whether there are captures anywhere in the expression. If there isn't, we can skip all // the boilerplate logic around uncapturing, as there won't be anything to uncapture. bool expressionHasCaptures = analysis.MayContainCapture(node); // Emit the code for all nodes in the tree. EmitNode(node); // If we fall through to this place in the code, we've successfully matched the expression. writer.WriteLine(); writer.WriteLine("// The input matched."); if (sliceStaticPos > 0) { EmitAdd(writer, "pos", sliceStaticPos); // TransferSliceStaticPosToPos would also slice, which isn't needed here } writer.WriteLine("base.runtextpos = pos;"); writer.WriteLine("base.Capture(0, original_pos, pos);"); writer.WriteLine("return true;"); // We're done with the match. // Patch up any additional declarations. ReplaceAdditionalDeclarations(writer, additionalDeclarations, additionalDeclarationsPosition, additionalDeclarationsIndent); // And emit any required helpers. if (additionalLocalFunctions.Count != 0) { foreach (KeyValuePair<string, string[]> localFunctions in additionalLocalFunctions.OrderBy(k => k.Key)) { writer.WriteLine(); foreach (string line in localFunctions.Value) { writer.WriteLine(line); } } } return requiredHelpers; // Helper to create a name guaranteed to be unique within the function. string ReserveName(string prefix) { usedNames.TryGetValue(prefix, out int count); usedNames[prefix] = count + 1; return count == 0 ? prefix : $"{prefix}{count}"; } // Helper to emit a label. As of C# 10, labels aren't statements of their own and need to adorn a following statement; // if a label appears just before a closing brace, then, it's a compilation error. To avoid issues there, this by // default implements a blank statement (a semicolon) after each label, but individual uses can opt-out of the semicolon // when it's known the label will always be followed by a statement. void MarkLabel(string label, bool emitSemicolon = true) => writer.WriteLine($"{label}:{(emitSemicolon ? ";" : "")}"); // Emits a goto to jump to the specified label. However, if the specified label is the top-level done label indicating // that the entire match has failed, we instead emit our epilogue, uncapturing if necessary and returning out of TryMatchAtCurrentPosition. void Goto(string label) { if (label == topLevelDoneLabel) { // We only get here in the code if the whole expression fails to match and jumps to // the original value of doneLabel. if (expressionHasCaptures) { EmitUncaptureUntil("0"); } writer.WriteLine("return false; // The input didn't match."); } else { writer.WriteLine($"goto {label};"); } } // Emits a case or default line followed by an indented body. void CaseGoto(string clause, string label) { writer.WriteLine(clause); writer.Indent++; Goto(label); writer.Indent--; } // Whether the node has RegexOptions.IgnoreCase set. static bool IsCaseInsensitive(RegexNode node) => (node.Options & RegexOptions.IgnoreCase) != 0; // Slices the inputSpan starting at pos until end and stores it into slice. void SliceInputSpan(IndentedTextWriter writer, bool defineLocal = false) { if (defineLocal) { writer.Write("global::System.ReadOnlySpan<char> "); } writer.WriteLine($"{sliceSpan} = inputSpan.Slice(pos);"); } // Emits the sum of a constant and a value from a local. string Sum(int constant, string? local = null) => local is null ? constant.ToString(CultureInfo.InvariantCulture) : constant == 0 ? local : $"{constant} + {local}"; // Emits a check that the span is large enough at the currently known static position to handle the required additional length. void EmitSpanLengthCheck(int requiredLength, string? dynamicRequiredLength = null) { Debug.Assert(requiredLength > 0); using (EmitBlock(writer, $"if ({SpanLengthCheck(requiredLength, dynamicRequiredLength)})")) { Goto(doneLabel); } } // Returns a length check for the current span slice. The check returns true if // the span isn't long enough for the specified length. string SpanLengthCheck(int requiredLength, string? dynamicRequiredLength = null) => dynamicRequiredLength is null && sliceStaticPos + requiredLength == 1 ? $"{sliceSpan}.IsEmpty" : $"(uint){sliceSpan}.Length < {Sum(sliceStaticPos + requiredLength, dynamicRequiredLength)}"; // Adds the value of sliceStaticPos into the pos local, slices slice by the corresponding amount, // and zeros out sliceStaticPos. void TransferSliceStaticPosToPos() { if (sliceStaticPos > 0) { EmitAdd(writer, "pos", sliceStaticPos); writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice({sliceStaticPos});"); sliceStaticPos = 0; } } // Emits the code for an alternation. void EmitAlternation(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Alternate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); int childCount = node.ChildCount(); Debug.Assert(childCount >= 2); string originalDoneLabel = doneLabel; // Both atomic and non-atomic are supported. While a parent RegexNode.Atomic node will itself // successfully prevent backtracking into this child node, we can emit better / cheaper code // for an Alternate when it is atomic, so we still take it into account here. Debug.Assert(node.Parent is not null); bool isAtomic = analysis.IsAtomicByAncestor(node); // If no child branch overlaps with another child branch, we can emit more streamlined code // that avoids checking unnecessary branches, e.g. with abc|def|ghi if the next character in // the input is 'a', we needn't try the def or ghi branches. A simple, relatively common case // of this is if every branch begins with a specific, unique character, in which case // the whole alternation can be treated as a simple switch, so we special-case that. However, // we can't goto _into_ switch cases, which means we can't use this approach if there's any // possibility of backtracking into the alternation. bool useSwitchedBranches = isAtomic; if (!useSwitchedBranches) { useSwitchedBranches = true; for (int i = 0; i < childCount; i++) { if (analysis.MayBacktrack(node.Child(i))) { useSwitchedBranches = false; break; } } } // Detect whether every branch begins with one or more unique characters. const int SetCharsSize = 5; // arbitrary limit (for IgnoreCase, we want this to be at least 3 to handle the vast majority of values) Span<char> setChars = stackalloc char[SetCharsSize]; if (useSwitchedBranches) { // Iterate through every branch, seeing if we can easily find a starting One, Multi, or small Set. // If we can, extract its starting char (or multiple in the case of a set), validate that all such // starting characters are unique relative to all the branches. var seenChars = new HashSet<char>(); for (int i = 0; i < childCount && useSwitchedBranches; i++) { // If it's not a One, Multi, or Set, we can't apply this optimization. // If it's IgnoreCase (and wasn't reduced to a non-IgnoreCase set), also ignore it to keep the logic simple. if (node.Child(i).FindBranchOneMultiOrSetStart() is not RegexNode oneMultiOrSet || (oneMultiOrSet.Options & RegexOptions.IgnoreCase) != 0) // TODO: https://github.com/dotnet/runtime/issues/61048 { useSwitchedBranches = false; break; } // If it's a One or a Multi, get the first character and add it to the set. // If it was already in the set, we can't apply this optimization. if (oneMultiOrSet.Kind is RegexNodeKind.One or RegexNodeKind.Multi) { if (!seenChars.Add(oneMultiOrSet.FirstCharOfOneOrMulti())) { useSwitchedBranches = false; break; } } else { // The branch begins with a set. Make sure it's a set of only a few characters // and get them. If we can't, we can't apply this optimization. Debug.Assert(oneMultiOrSet.Kind is RegexNodeKind.Set); int numChars; if (RegexCharClass.IsNegated(oneMultiOrSet.Str!) || (numChars = RegexCharClass.GetSetChars(oneMultiOrSet.Str!, setChars)) == 0) { useSwitchedBranches = false; break; } // Check to make sure each of the chars is unique relative to all other branches examined. foreach (char c in setChars.Slice(0, numChars)) { if (!seenChars.Add(c)) { useSwitchedBranches = false; break; } } } } } if (useSwitchedBranches) { // Note: This optimization does not exist with RegexOptions.Compiled. Here we rely on the // C# compiler to lower the C# switch statement with appropriate optimizations. In some // cases there are enough branches that the compiler will emit a jump table. In others // it'll optimize the order of checks in order to minimize the total number in the worst // case. In any case, we get easier to read and reason about C#. EmitSwitchedBranches(); } else { EmitAllBranches(); } return; // Emits the code for a switch-based alternation of non-overlapping branches. void EmitSwitchedBranches() { // We need at least 1 remaining character in the span, for the char to switch on. EmitSpanLengthCheck(1); writer.WriteLine(); // Emit a switch statement on the first char of each branch. using (EmitBlock(writer, $"switch ({sliceSpan}[{sliceStaticPos++}])")) { Span<char> setChars = stackalloc char[SetCharsSize]; // needs to be same size as detection check in caller int startingSliceStaticPos = sliceStaticPos; // Emit a case for each branch. for (int i = 0; i < childCount; i++) { sliceStaticPos = startingSliceStaticPos; RegexNode child = node.Child(i); Debug.Assert(child.Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set or RegexNodeKind.Concatenate, DescribeNode(child, analysis)); Debug.Assert(child.Kind is not RegexNodeKind.Concatenate || (child.ChildCount() >= 2 && child.Child(0).Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set)); RegexNode? childStart = child.FindBranchOneMultiOrSetStart(); Debug.Assert(childStart is not null, "Unexpectedly couldn't find the branch starting node."); Debug.Assert((childStart.Options & RegexOptions.IgnoreCase) == 0, "Expected only to find non-IgnoreCase branch starts"); if (childStart.Kind is RegexNodeKind.Set) { int numChars = RegexCharClass.GetSetChars(childStart.Str!, setChars); Debug.Assert(numChars != 0); writer.WriteLine($"case {string.Join(" or ", setChars.Slice(0, numChars).ToArray().Select(c => Literal(c)))}:"); } else { writer.WriteLine($"case {Literal(childStart.FirstCharOfOneOrMulti())}:"); } writer.Indent++; // Emit the code for the branch, without the first character that was already matched in the switch. switch (child.Kind) { case RegexNodeKind.Multi: EmitNode(CloneMultiWithoutFirstChar(child)); writer.WriteLine(); break; case RegexNodeKind.Concatenate: var newConcat = new RegexNode(RegexNodeKind.Concatenate, child.Options); if (childStart.Kind == RegexNodeKind.Multi) { newConcat.AddChild(CloneMultiWithoutFirstChar(childStart)); } int concatChildCount = child.ChildCount(); for (int j = 1; j < concatChildCount; j++) { newConcat.AddChild(child.Child(j)); } EmitNode(newConcat.Reduce()); writer.WriteLine(); break; static RegexNode CloneMultiWithoutFirstChar(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Multi); Debug.Assert(node.Str!.Length >= 2); return node.Str!.Length == 2 ? new RegexNode(RegexNodeKind.One, node.Options, node.Str![1]) : new RegexNode(RegexNodeKind.Multi, node.Options, node.Str!.Substring(1)); } } // This is only ever used for atomic alternations, so we can simply reset the doneLabel // after emitting the child, as nothing will backtrack here (and we need to reset it // so that all branches see the original). doneLabel = originalDoneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. TransferSliceStaticPosToPos(); writer.WriteLine($"break;"); writer.WriteLine(); writer.Indent--; } // Default branch if the character didn't match the start of any branches. CaseGoto("default:", doneLabel); } } void EmitAllBranches() { // Label to jump to when any branch completes successfully. string matchLabel = ReserveName("AlternationMatch"); // Save off pos. We'll need to reset this each time a branch fails. string startingPos = ReserveName("alternation_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); int startingSliceStaticPos = sliceStaticPos; // We need to be able to undo captures in two situations: // - If a branch of the alternation itself contains captures, then if that branch // fails to match, any captures from that branch until that failure point need to // be uncaptured prior to jumping to the next branch. // - If the expression after the alternation contains captures, then failures // to match in those expressions could trigger backtracking back into the // alternation, and thus we need uncapture any of them. // As such, if the alternation contains captures or if it's not atomic, we need // to grab the current crawl position so we can unwind back to it when necessary. // We can do all of the uncapturing as part of falling through to the next branch. // If we fail in a branch, then such uncapturing will unwind back to the position // at the start of the alternation. If we fail after the alternation, and the // matched branch didn't contain any backtracking, then the failure will end up // jumping to the next branch, which will unwind the captures. And if we fail after // the alternation and the matched branch did contain backtracking, that backtracking // construct is responsible for unwinding back to its starting crawl position. If // it eventually ends up failing, that failure will result in jumping to the next branch // of the alternation, which will again dutifully unwind the remaining captures until // what they were at the start of the alternation. Of course, if there are no captures // anywhere in the regex, we don't have to do any of that. string? startingCapturePos = null; if (expressionHasCaptures && (analysis.MayContainCapture(node) || !isAtomic)) { startingCapturePos = ReserveName("alternation_starting_capturepos"); writer.WriteLine($"int {startingCapturePos} = base.Crawlpos();"); } writer.WriteLine(); // After executing the alternation, subsequent matching may fail, at which point execution // will need to backtrack to the alternation. We emit a branching table at the end of the // alternation, with a label that will be left as the "doneLabel" upon exiting emitting the // alternation. The branch table is populated with an entry for each branch of the alternation, // containing either the label for the last backtracking construct in the branch if such a construct // existed (in which case the doneLabel upon emitting that node will be different from before it) // or the label for the next branch. var labelMap = new string[childCount]; string backtrackLabel = ReserveName("AlternationBacktrack"); for (int i = 0; i < childCount; i++) { // If the alternation isn't atomic, backtracking may require our jump table jumping back // into these branches, so we can't use actual scopes, as that would hide the labels. using (EmitScope(writer, $"Branch {i}", faux: !isAtomic)) { bool isLastBranch = i == childCount - 1; string? nextBranch = null; if (!isLastBranch) { // Failure to match any branch other than the last one should result // in jumping to process the next branch. nextBranch = ReserveName("AlternationBranch"); doneLabel = nextBranch; } else { // Failure to match the last branch is equivalent to failing to match // the whole alternation, which means those failures should jump to // what "doneLabel" was defined as when starting the alternation. doneLabel = originalDoneLabel; } // Emit the code for each branch. EmitNode(node.Child(i)); writer.WriteLine(); // Add this branch to the backtracking table. At this point, either the child // had backtracking constructs, in which case doneLabel points to the last one // and that's where we'll want to jump to, or it doesn't, in which case doneLabel // still points to the nextBranch, which similarly is where we'll want to jump to. if (!isAtomic) { EmitStackPush(startingCapturePos is not null ? new[] { i.ToString(), startingPos, startingCapturePos } : new[] { i.ToString(), startingPos }); } labelMap[i] = doneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. TransferSliceStaticPosToPos(); if (!isLastBranch || !isAtomic) { // If this isn't the last branch, we're about to output a reset section, // and if this isn't atomic, there will be a backtracking section before // the end of the method. In both of those cases, we've successfully // matched and need to skip over that code. If, however, this is the // last branch and this is an atomic alternation, we can just fall // through to the successfully matched location. Goto(matchLabel); } // Reset state for next branch and loop around to generate it. This includes // setting pos back to what it was at the beginning of the alternation, // updating slice to be the full length it was, and if there's a capture that // needs to be reset, uncapturing it. if (!isLastBranch) { writer.WriteLine(); MarkLabel(nextBranch!, emitSemicolon: false); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } } } writer.WriteLine(); } // We should never fall through to this location in the generated code. Either // a branch succeeded in matching and jumped to the end, or a branch failed in // matching and jumped to the next branch location. We only get to this code // if backtracking occurs and the code explicitly jumps here based on our setting // "doneLabel" to the label for this section. Thus, we only need to emit it if // something can backtrack to us, which can't happen if we're inside of an atomic // node. Thus, emit the backtracking section only if we're non-atomic. if (isAtomic) { doneLabel = originalDoneLabel; } else { doneLabel = backtrackLabel; MarkLabel(backtrackLabel, emitSemicolon: false); EmitStackPop(startingCapturePos is not null ? new[] { startingCapturePos, startingPos } : new[] { startingPos}); using (EmitBlock(writer, $"switch ({StackPop()})")) { for (int i = 0; i < labelMap.Length; i++) { CaseGoto($"case {i}:", labelMap[i]); } } writer.WriteLine(); } // Successfully completed the alternate. MarkLabel(matchLabel); Debug.Assert(sliceStaticPos == 0); } } // Emits the code to handle a backreference. void EmitBackreference(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Backreference, $"Unexpected type: {node.Kind}"); int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); if (sliceStaticPos > 0) { TransferSliceStaticPosToPos(); writer.WriteLine(); } // If the specified capture hasn't yet captured anything, fail to match... except when using RegexOptions.ECMAScript, // in which case per ECMA 262 section 21.2.2.9 the backreference should succeed. if ((node.Options & RegexOptions.ECMAScript) != 0) { writer.WriteLine($"// If the {DescribeCapture(node.M, analysis)} hasn't matched, the backreference matches with RegexOptions.ECMAScript rules."); using (EmitBlock(writer, $"if (base.IsMatched({capnum}))")) { EmitWhenHasCapture(); } } else { writer.WriteLine($"// If the {DescribeCapture(node.M, analysis)} hasn't matched, the backreference doesn't match."); using (EmitBlock(writer, $"if (!base.IsMatched({capnum}))")) { Goto(doneLabel); } writer.WriteLine(); EmitWhenHasCapture(); } void EmitWhenHasCapture() { writer.WriteLine("// Get the captured text. If it doesn't match at the current position, the backreference doesn't match."); additionalDeclarations.Add("int matchLength = 0;"); writer.WriteLine($"matchLength = base.MatchLength({capnum});"); if (!IsCaseInsensitive(node)) { // If we're case-sensitive, we can simply validate that the remaining length of the slice is sufficient // to possibly match, and then do a SequenceEqual against the matched text. writer.WriteLine($"if ({sliceSpan}.Length < matchLength || "); using (EmitBlock(writer, $" !global::System.MemoryExtensions.SequenceEqual(inputSpan.Slice(base.MatchIndex({capnum}), matchLength), {sliceSpan}.Slice(0, matchLength)))")) { Goto(doneLabel); } } else { // For case-insensitive, we have to walk each character individually. using (EmitBlock(writer, $"if ({sliceSpan}.Length < matchLength)")) { Goto(doneLabel); } writer.WriteLine(); additionalDeclarations.Add("int matchIndex = 0;"); writer.WriteLine($"matchIndex = base.MatchIndex({capnum});"); using (EmitBlock(writer, $"for (int i = 0; i < matchLength; i++)")) { using (EmitBlock(writer, $"if ({ToLower(hasTextInfo, options, $"inputSpan[matchIndex + i]")} != {ToLower(hasTextInfo, options, $"{sliceSpan}[i]")})")) { Goto(doneLabel); } } } writer.WriteLine(); writer.WriteLine($"pos += matchLength;"); SliceInputSpan(writer); } } // Emits the code for an if(backreference)-then-else conditional. void EmitBackreferenceConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.BackreferenceConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 2, $"Expected 2 children, found {node.ChildCount()}"); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // Get the capture number to test. int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(0); RegexNode? noBranch = node.Child(1) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; string originalDoneLabel = doneLabel; // If the child branches might backtrack, we can't emit the branches inside constructs that // require braces, e.g. if/else, even though that would yield more idiomatic output. // But if we know for certain they won't backtrack, we can output the nicer code. if (analysis.IsAtomicByAncestor(node) || (!analysis.MayBacktrack(yesBranch) && (noBranch is null || !analysis.MayBacktrack(noBranch)))) { using (EmitBlock(writer, $"if (base.IsMatched({capnum}))")) { writer.WriteLine($"// The {DescribeCapture(node.M, analysis)} captured a value. Match the first branch."); EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch } if (noBranch is not null) { using (EmitBlock(writer, $"else")) { writer.WriteLine($"// Otherwise, match the second branch."); EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch } } doneLabel = originalDoneLabel; // atomicity return; } string refNotMatched = ReserveName("ConditionalBackreferenceNotMatched"); string endConditional = ReserveName("ConditionalBackreferenceEnd"); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the conditional needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. string resumeAt = ReserveName("conditionalbackreference_branch"); writer.WriteLine($"int {resumeAt} = 0;"); // While it would be nicely readable to use an if/else block, if the branches contain // anything that triggers backtracking, labels will end up being defined, and if they're // inside the scope block for the if or else, that will prevent jumping to them from // elsewhere. So we implement the if/else with labels and gotos manually. // Check to see if the specified capture number was captured. using (EmitBlock(writer, $"if (!base.IsMatched({capnum}))")) { Goto(refNotMatched); } writer.WriteLine(); // The specified capture was captured. Run the "yes" branch. // If it successfully matches, jump to the end. EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch string postYesDoneLabel = doneLabel; if (postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 0;"); } bool needsEndConditional = postYesDoneLabel != originalDoneLabel || noBranch is not null; if (needsEndConditional) { Goto(endConditional); writer.WriteLine(); } MarkLabel(refNotMatched); string postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (postNoDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 1;"); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 2;"); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. bool hasBacktracking = postYesDoneLabel != originalDoneLabel || postNoDoneLabel != originalDoneLabel; if (hasBacktracking) { // Skip the backtracking section. Goto(endConditional); writer.WriteLine(); // Backtrack section string backtrack = ReserveName("ConditionalBackreferenceBacktrack"); doneLabel = backtrack; MarkLabel(backtrack); // Pop from the stack the branch that was used and jump back to its backtracking location. EmitStackPop(resumeAt); using (EmitBlock(writer, $"switch ({resumeAt})")) { if (postYesDoneLabel != originalDoneLabel) { CaseGoto("case 0:", postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { CaseGoto("case 1:", postNoDoneLabel); } CaseGoto("default:", originalDoneLabel); } } if (needsEndConditional) { MarkLabel(endConditional); } if (hasBacktracking) { // We're not atomic and at least one of the yes or no branches contained backtracking constructs, // so finish outputting our backtracking logic, which involves pushing onto the stack which // branch to backtrack into. EmitStackPush(resumeAt); } } // Emits the code for an if(expression)-then-else conditional. void EmitExpressionConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 3, $"Expected 3 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // The first child node is the condition expression. If this matches, then we branch to the "yes" branch. // If it doesn't match, then we branch to the optional "no" branch if it exists, or simply skip the "yes" // branch, otherwise. The condition is treated as a positive lookahead. RegexNode condition = node.Child(0); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(1); RegexNode? noBranch = node.Child(2) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; string originalDoneLabel = doneLabel; string expressionNotMatched = ReserveName("ConditionalExpressionNotMatched"); string endConditional = ReserveName("ConditionalExpressionEnd"); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the condition needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. string resumeAt = ReserveName("conditionalexpression_branch"); if (!isAtomic) { writer.WriteLine($"int {resumeAt} = 0;"); } // If the condition expression has captures, we'll need to uncapture them in the case of no match. string? startingCapturePos = null; if (analysis.MayContainCapture(condition)) { startingCapturePos = ReserveName("conditionalexpression_starting_capturepos"); writer.WriteLine($"int {startingCapturePos} = base.Crawlpos();"); } // Emit the condition expression. Route any failures to after the yes branch. This code is almost // the same as for a positive lookahead; however, a positive lookahead only needs to reset the position // on a successful match, as a failed match fails the whole expression; here, we need to reset the // position on completion, regardless of whether the match is successful or not. doneLabel = expressionNotMatched; // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("conditionalexpression_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); int startingSliceStaticPos = sliceStaticPos; // Emit the child. The condition expression is a zero-width assertion, which is atomic, // so prevent backtracking into it. writer.WriteLine("// Condition:"); EmitNode(condition); writer.WriteLine(); doneLabel = originalDoneLabel; // After the condition completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. writer.WriteLine("// Condition matched:"); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; writer.WriteLine(); // The expression matched. Run the "yes" branch. If it successfully matches, jump to the end. EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch string postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 0;"); } Goto(endConditional); writer.WriteLine(); // After the condition completes unsuccessfully, reset the text positions // _and_ reset captures, which should not persist when the whole expression failed. writer.WriteLine("// Condition did not match:"); MarkLabel(expressionNotMatched, emitSemicolon: false); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } writer.WriteLine(); string postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 1;"); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 2;"); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { doneLabel = originalDoneLabel; MarkLabel(endConditional); } else { // Skip the backtracking section. Goto(endConditional); writer.WriteLine(); string backtrack = ReserveName("ConditionalExpressionBacktrack"); doneLabel = backtrack; MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(resumeAt); using (EmitBlock(writer, $"switch ({resumeAt})")) { if (postYesDoneLabel != originalDoneLabel) { CaseGoto("case 0:", postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { CaseGoto("case 1:", postNoDoneLabel); } CaseGoto("default:", originalDoneLabel); } MarkLabel(endConditional, emitSemicolon: false); EmitStackPush(resumeAt); } } // Emits the code for a Capture node. void EmitCapture(RegexNode node, RegexNode? subsequent = null) { Debug.Assert(node.Kind is RegexNodeKind.Capture, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); int uncapnum = RegexParser.MapCaptureNumber(node.N, rm.Tree.CaptureNumberSparseMapping); bool isAtomic = analysis.IsAtomicByAncestor(node); TransferSliceStaticPosToPos(); string startingPos = ReserveName("capture_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); RegexNode child = node.Child(0); if (uncapnum != -1) { using (EmitBlock(writer, $"if (!base.IsMatched({uncapnum}))")) { Goto(doneLabel); } writer.WriteLine(); } // Emit child node. string originalDoneLabel = doneLabel; EmitNode(child, subsequent); bool childBacktracks = doneLabel != originalDoneLabel; writer.WriteLine(); TransferSliceStaticPosToPos(); if (uncapnum == -1) { writer.WriteLine($"base.Capture({capnum}, {startingPos}, pos);"); } else { writer.WriteLine($"base.TransferCapture({capnum}, {uncapnum}, {startingPos}, pos);"); } if (isAtomic || !childBacktracks) { // If the capture is atomic and nothing can backtrack into it, we're done. // Similarly, even if the capture isn't atomic, if the captured expression // doesn't do any backtracking, we're done. doneLabel = originalDoneLabel; } else { // We're not atomic and the child node backtracks. When it does, we need // to ensure that the starting position for the capture is appropriately // reset to what it was initially (it could have changed as part of being // in a loop or similar). So, we emit a backtracking section that // pushes/pops the starting position before falling through. writer.WriteLine(); EmitStackPush(startingPos); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label string backtrack = ReserveName($"CaptureBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(startingPos); if (!childBacktracks) { writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); } Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } // Emits the code to handle a positive lookahead assertion. void EmitPositiveLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.PositiveLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("positivelookahead_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); int startingSliceStaticPos = sliceStaticPos; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // After the child completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. writer.WriteLine(); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; } // Emits the code to handle a negative lookahead assertion. void EmitNegativeLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); string originalDoneLabel = doneLabel; // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("negativelookahead_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); int startingSliceStaticPos = sliceStaticPos; string negativeLookaheadDoneLabel = ReserveName("NegativeLookaheadMatch"); doneLabel = negativeLookaheadDoneLabel; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // If the generated code ends up here, it matched the lookahead, which actually // means failure for a _negative_ lookahead, so we need to jump to the original done. writer.WriteLine(); Goto(originalDoneLabel); writer.WriteLine(); // Failures (success for a negative lookahead) jump here. MarkLabel(negativeLookaheadDoneLabel, emitSemicolon: false); // After the child completes in failure (success for negative lookahead), reset the text positions. writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; doneLabel = originalDoneLabel; } // Emits the code for the node. void EmitNode(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { StackHelper.CallOnEmptyStack(EmitNode, node, subsequent, emitLengthChecksIfRequired); return; } // Separate out several node types that, for conciseness, don't need a header and scope written into the source. switch (node.Kind) { // Nothing is written for an empty case RegexNodeKind.Empty: return; // A match failure doesn't need a scope. case RegexNodeKind.Nothing: Goto(doneLabel); return; // Skip atomic nodes that wrap non-backtracking children; in such a case there's nothing to be made atomic. case RegexNodeKind.Atomic when !analysis.MayBacktrack(node.Child(0)): EmitNode(node.Child(0)); return; // Concatenate is a simplification in the node tree so that a series of children can be represented as one. // We don't need its presence visible in the source. case RegexNodeKind.Concatenate: EmitConcatenation(node, subsequent, emitLengthChecksIfRequired); return; } // Put the node's code into its own scope. If the node contains labels that may need to // be visible outside of its scope, the scope is still emitted for clarity but is commented out. using (EmitScope(writer, DescribeNode(node, analysis), faux: analysis.MayBacktrack(node))) { switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: case RegexNodeKind.Bol: case RegexNodeKind.Eol: case RegexNodeKind.End: case RegexNodeKind.EndZ: EmitAnchors(node); break; case RegexNodeKind.Boundary: case RegexNodeKind.NonBoundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.NonECMABoundary: EmitBoundary(node); break; case RegexNodeKind.Multi: EmitMultiChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: EmitSingleChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: EmitSingleCharLoop(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: EmitSingleCharLazy(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: EmitSingleCharAtomicLoop(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Loop: EmitLoop(node); break; case RegexNodeKind.Lazyloop: EmitLazy(node); break; case RegexNodeKind.Alternate: EmitAlternation(node); break; case RegexNodeKind.Backreference: EmitBackreference(node); break; case RegexNodeKind.BackreferenceConditional: EmitBackreferenceConditional(node); break; case RegexNodeKind.ExpressionConditional: EmitExpressionConditional(node); break; case RegexNodeKind.Atomic when analysis.MayBacktrack(node.Child(0)): EmitAtomic(node, subsequent); return; case RegexNodeKind.Capture: EmitCapture(node, subsequent); break; case RegexNodeKind.PositiveLookaround: EmitPositiveLookaheadAssertion(node); break; case RegexNodeKind.NegativeLookaround: EmitNegativeLookaheadAssertion(node); break; case RegexNodeKind.UpdateBumpalong: EmitUpdateBumpalong(node); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } } } // Emits the node for an atomic. void EmitAtomic(RegexNode node, RegexNode? subsequent) { Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(analysis.MayBacktrack(node.Child(0)), "Expected child to potentially backtrack"); // Grab the current done label and the current backtracking position. The purpose of the atomic node // is to ensure that nodes after it that might backtrack skip over the atomic, which means after // rendering the atomic's child, we need to reset the label so that subsequent backtracking doesn't // see any label left set by the atomic's child. We also need to reset the backtracking stack position // so that the state on the stack remains consistent. string originalDoneLabel = doneLabel; additionalDeclarations.Add("int stackpos = 0;"); string startingStackpos = ReserveName("atomic_stackpos"); writer.WriteLine($"int {startingStackpos} = stackpos;"); writer.WriteLine(); // Emit the child. EmitNode(node.Child(0), subsequent); writer.WriteLine(); // Reset the stack position and done label. writer.WriteLine($"stackpos = {startingStackpos};"); doneLabel = originalDoneLabel; } // Emits the code to handle updating base.runtextpos to pos in response to // an UpdateBumpalong node. This is used when we want to inform the scan loop that // it should bump from this location rather than from the original location. void EmitUpdateBumpalong(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.UpdateBumpalong, $"Unexpected type: {node.Kind}"); TransferSliceStaticPosToPos(); using (EmitBlock(writer, "if (base.runtextpos < pos)")) { writer.WriteLine("base.runtextpos = pos;"); } } // Emits code for a concatenation void EmitConcatenation(RegexNode node, RegexNode? subsequent, bool emitLengthChecksIfRequired) { Debug.Assert(node.Kind is RegexNodeKind.Concatenate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); // Emit the code for each child one after the other. string? prevDescription = null; int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { // If we can find a subsequence of fixed-length children, we can emit a length check once for that sequence // and then skip the individual length checks for each. We also want to minimize the repetition of if blocks, // and so we try to emit a series of clauses all part of the same if block rather than one if block per child. if (emitLengthChecksIfRequired && node.TryGetJoinableLengthCheckChildRange(i, out int requiredLength, out int exclusiveEnd)) { bool wroteClauses = true; writer.Write($"if ({SpanLengthCheck(requiredLength)}"); while (i < exclusiveEnd) { for (; i < exclusiveEnd; i++) { void WriteSingleCharChild(RegexNode child, bool includeDescription = true) { if (wroteClauses) { writer.WriteLine(prevDescription is not null ? $" || // {prevDescription}" : " ||"); writer.Write(" "); } else { writer.Write("if ("); } EmitSingleChar(child, emitLengthCheck: false, clauseOnly: true); prevDescription = includeDescription ? DescribeNode(child, analysis) : null; wroteClauses = true; } RegexNode child = node.Child(i); if (child.Kind is RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set) { WriteSingleCharChild(child); } else if (child.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Onelazy or RegexNodeKind.Oneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloopatomic && child.M == child.N && child.M <= MaxUnrollSize) { for (int c = 0; c < child.M; c++) { WriteSingleCharChild(child, includeDescription: c == 0); } } else { break; } } if (wroteClauses) { writer.WriteLine(prevDescription is not null ? $") // {prevDescription}" : ")"); using (EmitBlock(writer, null)) { Goto(doneLabel); } if (i < childCount) { writer.WriteLine(); } wroteClauses = false; prevDescription = null; } if (i < exclusiveEnd) { EmitNode(node.Child(i), GetSubsequentOrDefault(i, node, subsequent), emitLengthChecksIfRequired: false); if (i < childCount - 1) { writer.WriteLine(); } i++; } } i--; continue; } EmitNode(node.Child(i), GetSubsequentOrDefault(i, node, subsequent), emitLengthChecksIfRequired: emitLengthChecksIfRequired); if (i < childCount - 1) { writer.WriteLine(); } } // Gets the node to treat as the subsequent one to node.Child(index) static RegexNode? GetSubsequentOrDefault(int index, RegexNode node, RegexNode? defaultNode) { int childCount = node.ChildCount(); for (int i = index + 1; i < childCount; i++) { RegexNode next = node.Child(i); if (next.Kind is not RegexNodeKind.UpdateBumpalong) // skip node types that don't have a semantic impact { return next; } } return defaultNode; } } // Emits the code to handle a single-character match. void EmitSingleChar(RegexNode node, bool emitLengthCheck = true, string? offset = null, bool clauseOnly = false) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); // This only emits a single check, but it's called from the looping constructs in a loop // to generate the code for a single check, so we map those looping constructs to the // appropriate single check. string expr = $"{sliceSpan}[{Sum(sliceStaticPos, offset)}]"; if (node.IsSetFamily) { expr = $"{MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: true, additionalDeclarations, ref requiredHelpers)}"; } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "!=" : "==")} {Literal(node.Ch)}"; } if (clauseOnly) { writer.Write(expr); } else { using (EmitBlock(writer, emitLengthCheck ? $"if ({SpanLengthCheck(1, offset)} || {expr})" : $"if ({expr})")) { Goto(doneLabel); } } sliceStaticPos++; } // Emits the code to handle a boundary check on a character. void EmitBoundary(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Boundary or RegexNodeKind.NonBoundary or RegexNodeKind.ECMABoundary or RegexNodeKind.NonECMABoundary, $"Unexpected type: {node.Kind}"); string call = node.Kind switch { RegexNodeKind.Boundary => "!IsBoundary", RegexNodeKind.NonBoundary => "IsBoundary", RegexNodeKind.ECMABoundary => "!IsECMABoundary", _ => "IsECMABoundary", }; RequiredHelperFunctions boundaryFunctionRequired = node.Kind switch { RegexNodeKind.Boundary or RegexNodeKind.NonBoundary => RequiredHelperFunctions.IsBoundary | RequiredHelperFunctions.IsWordChar, // IsBoundary internally uses IsWordChar _ => RequiredHelperFunctions.IsECMABoundary }; requiredHelpers |= boundaryFunctionRequired; using (EmitBlock(writer, $"if ({call}(inputSpan, pos{(sliceStaticPos > 0 ? $" + {sliceStaticPos}" : "")}))")) { Goto(doneLabel); } } // Emits the code to handle various anchors. void EmitAnchors(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Beginning or RegexNodeKind.Start or RegexNodeKind.Bol or RegexNodeKind.End or RegexNodeKind.EndZ or RegexNodeKind.Eol, $"Unexpected type: {node.Kind}"); Debug.Assert(sliceStaticPos >= 0); switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: if (sliceStaticPos > 0) { // If we statically know we've already matched part of the regex, there's no way we're at the // beginning or start, as we've already progressed past it. Goto(doneLabel); } else { using (EmitBlock(writer, node.Kind == RegexNodeKind.Beginning ? "if (pos != 0)" : "if (pos != base.runtextstart)")) { Goto(doneLabel); } } break; case RegexNodeKind.Bol: if (sliceStaticPos > 0) { using (EmitBlock(writer, $"if ({sliceSpan}[{sliceStaticPos - 1}] != '\\n')")) { Goto(doneLabel); } } else { // We can't use our slice in this case, because we'd need to access slice[-1], so we access the inputSpan field directly: using (EmitBlock(writer, $"if (pos > 0 && inputSpan[pos - 1] != '\\n')")) { Goto(doneLabel); } } break; case RegexNodeKind.End: using (EmitBlock(writer, $"if ({IsSliceLengthGreaterThanSliceStaticPos()})")) { Goto(doneLabel); } break; case RegexNodeKind.EndZ: writer.WriteLine($"if ({sliceSpan}.Length > {sliceStaticPos + 1} || ({IsSliceLengthGreaterThanSliceStaticPos()} && {sliceSpan}[{sliceStaticPos}] != '\\n'))"); using (EmitBlock(writer, null)) { Goto(doneLabel); } break; case RegexNodeKind.Eol: using (EmitBlock(writer, $"if ({IsSliceLengthGreaterThanSliceStaticPos()} && {sliceSpan}[{sliceStaticPos}] != '\\n')")) { Goto(doneLabel); } break; string IsSliceLengthGreaterThanSliceStaticPos() => sliceStaticPos == 0 ? $"!{sliceSpan}.IsEmpty" : $"{sliceSpan}.Length > {sliceStaticPos}"; } } // Emits the code to handle a multiple-character match. void EmitMultiChar(RegexNode node, bool emitLengthCheck) { Debug.Assert(node.Kind is RegexNodeKind.Multi, $"Unexpected type: {node.Kind}"); Debug.Assert(node.Str is not null); EmitMultiCharString(node.Str, IsCaseInsensitive(node), emitLengthCheck); } void EmitMultiCharString(string str, bool caseInsensitive, bool emitLengthCheck) { Debug.Assert(str.Length >= 2); if (caseInsensitive) // StartsWith(..., XxIgnoreCase) won't necessarily be the same as char-by-char comparison { // This case should be relatively rare. It will only occur with IgnoreCase and a series of non-ASCII characters. if (emitLengthCheck) { EmitSpanLengthCheck(str.Length); } using (EmitBlock(writer, $"for (int i = 0; i < {Literal(str)}.Length; i++)")) { string textSpanIndex = sliceStaticPos > 0 ? $"i + {sliceStaticPos}" : "i"; using (EmitBlock(writer, $"if ({ToLower(hasTextInfo, options, $"{sliceSpan}[{textSpanIndex}]")} != {Literal(str)}[i])")) { Goto(doneLabel); } } } else { string sourceSpan = sliceStaticPos > 0 ? $"{sliceSpan}.Slice({sliceStaticPos})" : sliceSpan; using (EmitBlock(writer, $"if (!global::System.MemoryExtensions.StartsWith({sourceSpan}, {Literal(str)}))")) { Goto(doneLabel); } } sliceStaticPos += str.Length; } void EmitSingleCharLoop(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead; no backtracking necessary. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // Emit backtracking around an atomic single char loop. We can then implement the backtracking // as an afterthought, since we know exactly how many characters are accepted by each iteration // of the wrapped loop (1) and that there's nothing captured by the loop. Debug.Assert(node.M < node.N); string backtrackingLabel = ReserveName("CharLoopBacktrack"); string endLoop = ReserveName("CharLoopEnd"); string startingPos = ReserveName("charloop_starting_pos"); string endingPos = ReserveName("charloop_ending_pos"); additionalDeclarations.Add($"int {startingPos} = 0, {endingPos} = 0;"); // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // Grab the current position, then emit the loop as atomic, and then // grab the current position again. Even though we emit the loop without // knowledge of backtracking, we can layer it on top by just walking back // through the individual characters (a benefit of the loop matching exactly // one character per iteration, no possible captures within the loop, etc.) writer.WriteLine($"{startingPos} = pos;"); writer.WriteLine(); EmitSingleCharAtomicLoop(node); writer.WriteLine(); TransferSliceStaticPosToPos(); writer.WriteLine($"{endingPos} = pos;"); EmitAdd(writer, startingPos, node.M); Goto(endLoop); writer.WriteLine(); // Backtracking section. Subsequent failures will jump to here, at which // point we decrement the matched count as long as it's above the minimum // required, and try again by flowing to everything that comes after this. MarkLabel(backtrackingLabel, emitSemicolon: false); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } EmitStackPop(endingPos, startingPos); writer.WriteLine(); if (subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal) { writer.WriteLine($"if ({startingPos} >= {endingPos} ||"); using (EmitBlock(writer, literal.Item2 is not null ? $" ({endingPos} = global::System.MemoryExtensions.LastIndexOf(inputSpan.Slice({startingPos}, global::System.Math.Min(inputSpan.Length, {endingPos} + {literal.Item2.Length - 1}) - {startingPos}), {Literal(literal.Item2)})) < 0)" : literal.Item3 is null ? $" ({endingPos} = global::System.MemoryExtensions.LastIndexOf(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item1)})) < 0)" : literal.Item3.Length switch { 2 => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])})) < 0)", 3 => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])}, {Literal(literal.Item3[2])})) < 0)", _ => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3)})) < 0)", })) { Goto(doneLabel); } writer.WriteLine($"{endingPos} += {startingPos};"); writer.WriteLine($"pos = {endingPos};"); } else { using (EmitBlock(writer, $"if ({startingPos} >= {endingPos})")) { Goto(doneLabel); } writer.WriteLine($"pos = --{endingPos};"); } SliceInputSpan(writer); writer.WriteLine(); MarkLabel(endLoop, emitSemicolon: false); EmitStackPush(expressionHasCaptures ? new[] { startingPos, endingPos, "base.Crawlpos()" } : new[] { startingPos, endingPos }); doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes } void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy, $"Unexpected type: {node.Kind}"); // Emit the min iterations as a repeater. Any failures here don't necessitate backtracking, // as the lazy itself failed to match, and there's no backtracking possible by the individual // characters/iterations themselves. if (node.M > 0) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); } // If the whole thing was actually that repeater, we're done. Similarly, if this is actually an atomic // lazy loop, nothing will ever backtrack into this node, so we never need to iterate more than the minimum. if (node.M == node.N || analysis.IsAtomicByAncestor(node)) { return; } if (node.M > 0) { // We emitted a repeater to handle the required iterations; add a newline after it. writer.WriteLine(); } Debug.Assert(node.M < node.N); // We now need to match one character at a time, each time allowing the remainder of the expression // to try to match, and only matching another character if the subsequent expression fails to match. // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // If the loop isn't unbounded, track the number of iterations and the max number to allow. string? iterationCount = null; string? maxIterations = null; if (node.N != int.MaxValue) { maxIterations = $"{node.N - node.M}"; iterationCount = ReserveName("lazyloop_iteration"); writer.WriteLine($"int {iterationCount} = 0;"); } // Track the current crawl position. Upon backtracking, we'll unwind any captures beyond this point. string? capturePos = null; if (expressionHasCaptures) { capturePos = ReserveName("lazyloop_capturepos"); additionalDeclarations.Add($"int {capturePos} = 0;"); } // Track the current pos. Each time we backtrack, we'll reset to the stored position, which // is also incremented each time we match another character in the loop. string startingPos = ReserveName("lazyloop_pos"); additionalDeclarations.Add($"int {startingPos} = 0;"); writer.WriteLine($"{startingPos} = pos;"); // Skip the backtracking section for the initial subsequent matching. We've already matched the // minimum number of iterations, which means we can successfully match with zero additional iterations. string endLoopLabel = ReserveName("LazyLoopEnd"); Goto(endLoopLabel); writer.WriteLine(); // Backtracking section. Subsequent failures will jump to here. string backtrackingLabel = ReserveName("LazyLoopBacktrack"); MarkLabel(backtrackingLabel, emitSemicolon: false); // Uncapture any captures if the expression has any. It's possible the captures it has // are before this node, in which case this is wasted effort, but still functionally correct. if (capturePos is not null) { EmitUncaptureUntil(capturePos); } // If there's a max number of iterations, see if we've exceeded the maximum number of characters // to match. If we haven't, increment the iteration count. if (maxIterations is not null) { using (EmitBlock(writer, $"if ({iterationCount} >= {maxIterations})")) { Goto(doneLabel); } writer.WriteLine($"{iterationCount}++;"); } // Now match the next item in the lazy loop. We need to reset the pos to the position // just after the last character in this loop was matched, and we need to store the resulting position // for the next time we backtrack. writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); EmitSingleChar(node); TransferSliceStaticPosToPos(); // Now that we've appropriately advanced by one character and are set for what comes after the loop, // see if we can skip ahead more iterations by doing a search for a following literal. if (iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && !IsCaseInsensitive(node) && subsequent?.FindStartingLiteral(4) is ValueTuple<char, string?, string?> literal && // 5 == max optimized by IndexOfAny, and we need to reserve 1 for node.Ch (literal.Item3 is not null ? !literal.Item3.Contains(node.Ch) : (literal.Item2?[0] ?? literal.Item1) != node.Ch)) // no overlap between node.Ch and the start of the literal { // e.g. "<[^>]*?>" // This lazy loop will consume all characters other than node.Ch until the subsequent literal. // We can implement it to search for either that char or the literal, whichever comes first. // If it ends up being that node.Ch, the loop fails (we're only here if we're backtracking). writer.WriteLine( literal.Item2 is not null ? $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item2[0])});" : literal.Item3 is null ? $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item1)});" : literal.Item3.Length switch { 2 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])});", _ => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch + literal.Item3)});", }); using (EmitBlock(writer, $"if ((uint){startingPos} >= (uint){sliceSpan}.Length || {sliceSpan}[{startingPos}] == {Literal(node.Ch)})")) { Goto(doneLabel); } writer.WriteLine($"pos += {startingPos};"); SliceInputSpan(writer); } else if (iterationCount is null && node.Kind is RegexNodeKind.Setlazy && node.Str == RegexCharClass.AnyClass && subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal2) { // e.g. ".*?string" with RegexOptions.Singleline // This lazy loop will consume all characters until the subsequent literal. If the subsequent literal // isn't found, the loop fails. We can implement it to just search for that literal. writer.WriteLine( literal2.Item2 is not null ? $"{startingPos} = global::System.MemoryExtensions.IndexOf({sliceSpan}, {Literal(literal2.Item2)});" : literal2.Item3 is null ? $"{startingPos} = global::System.MemoryExtensions.IndexOf({sliceSpan}, {Literal(literal2.Item1)});" : literal2.Item3.Length switch { 2 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3[0])}, {Literal(literal2.Item3[1])});", 3 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3[0])}, {Literal(literal2.Item3[1])}, {Literal(literal2.Item3[2])});", _ => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3)});", }); using (EmitBlock(writer, $"if ({startingPos} < 0)")) { Goto(doneLabel); } writer.WriteLine($"pos += {startingPos};"); SliceInputSpan(writer); } // Store the position we've left off at in case we need to iterate again. writer.WriteLine($"{startingPos} = pos;"); // Update the done label for everything that comes after this node. This is done after we emit the single char // matching, as that failing indicates the loop itself has failed to match. string originalDoneLabel = doneLabel; doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes writer.WriteLine(); MarkLabel(endLoopLabel); if (capturePos is not null) { writer.WriteLine($"{capturePos} = base.Crawlpos();"); } if (node.IsInLoop()) { writer.WriteLine(); // Store the loop's state var toPushPop = new List<string>(3) { startingPos }; if (capturePos is not null) { toPushPop.Add(capturePos); } if (iterationCount is not null) { toPushPop.Add(iterationCount); } string[] toPushPopArray = toPushPop.ToArray(); EmitStackPush(toPushPopArray); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label string backtrack = ReserveName("CharLazyBacktrack"); MarkLabel(backtrack, emitSemicolon: false); Array.Reverse(toPushPopArray); EmitStackPop(toPushPopArray); Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } void EmitLazy(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; string originalDoneLabel = doneLabel; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually an atomic lazy loop, we need to output just the minimum number of iterations, // as nothing will backtrack into the lazy loop to get it progress further. if (isAtomic) { switch (minIterations) { case 0: // Atomic lazy with a min count of 0: nop. return; case 1: // Atomic lazy with a min count of 1: just output the child, no looping required. EmitNode(node.Child(0)); return; } writer.WriteLine(); } // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); string startingPos = ReserveName("lazyloop_starting_pos"); string iterationCount = ReserveName("lazyloop_iteration"); string sawEmpty = ReserveName("lazyLoopEmptySeen"); string body = ReserveName("LazyLoopBody"); string endLoop = ReserveName("LazyLoopEnd"); writer.WriteLine($"int {iterationCount} = 0, {startingPos} = pos, {sawEmpty} = 0;"); // If the min count is 0, start out by jumping right to what's after the loop. Backtracking // will then bring us back in to do further iterations. if (minIterations == 0) { Goto(endLoop); } writer.WriteLine(); // Iteration body MarkLabel(body, emitSemicolon: false); EmitTimeoutCheck(writer, hasTimeout); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackPush(expressionHasCaptures ? new[] { "base.Crawlpos()", startingPos, "pos", sawEmpty } : new[] { startingPos, "pos", sawEmpty }); writer.WriteLine(); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. writer.WriteLine($"{startingPos} = pos;"); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. writer.WriteLine($"{iterationCount}++;"); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. string iterationFailedLabel = ReserveName("LazyLoopIterationNoMatch"); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); writer.WriteLine(); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 if (doneLabel == iterationFailedLabel) { doneLabel = originalDoneLabel; } // Loop condition. Continue iterating if we've not yet reached the minimum. if (minIterations > 0) { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, minIterations)})")) { Goto(body); } } // If the last iteration was empty, we need to prevent further iteration from this point // unless we backtrack out of this iteration. We can do that easily just by pretending // we reached the max iteration count. using (EmitBlock(writer, $"if (pos == {startingPos})")) { writer.WriteLine($"{sawEmpty} = 1;"); } // We matched the next iteration. Jump to the subsequent code. Goto(endLoop); writer.WriteLine(); // Now handle what happens when an iteration fails. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel, emitSemicolon: false); writer.WriteLine($"{iterationCount}--;"); using (EmitBlock(writer, $"if ({iterationCount} < 0)")) { Goto(originalDoneLabel); } EmitStackPop(sawEmpty, "pos", startingPos); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } SliceInputSpan(writer); if (doneLabel == originalDoneLabel) { Goto(originalDoneLabel); } else { using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } Goto(doneLabel); } writer.WriteLine(); MarkLabel(endLoop); if (!isAtomic) { // Store the capture's state and skip the backtracking section EmitStackPush(startingPos, iterationCount, sawEmpty); string skipBacktrack = ReserveName("SkipBacktrack"); Goto(skipBacktrack); writer.WriteLine(); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label string backtrack = ReserveName($"LazyLoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(sawEmpty, iterationCount, startingPos); if (maxIterations == int.MaxValue) { using (EmitBlock(writer, $"if ({sawEmpty} == 0)")) { Goto(body); } } else { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, maxIterations)} && {sawEmpty} == 0)")) { Goto(body); } } Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(skipBacktrack); } } // Emits the code to handle a loop (repeater) with a fixed number of iterations. // RegexNode.M is used for the number of iterations (RegexNode.N is ignored), as this // might be used to implement the required iterations of other kinds of loops. void EmitSingleCharRepeater(RegexNode node, bool emitLengthCheck = true) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); int iterations = node.M; switch (iterations) { case 0: // No iterations, nothing to do. return; case 1: // Just match the individual item EmitSingleChar(node, emitLengthCheck); return; case <= RegexNode.MultiVsRepeaterLimit when node.IsOneFamily && !IsCaseInsensitive(node): // This is a repeated case-sensitive character; emit it as a multi in order to get all the optimizations // afforded to a multi, e.g. unrolling the loop with multi-char reads/comparisons at a time. EmitMultiCharString(new string(node.Ch, iterations), caseInsensitive: false, emitLengthCheck); return; } if (iterations <= MaxUnrollSize) { // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length || // slice[sliceStaticPos] != c1 || // slice[sliceStaticPos + 1] != c2 || // ...) // { // goto doneLabel; // } writer.Write($"if ("); if (emitLengthCheck) { writer.WriteLine($"{SpanLengthCheck(iterations)} ||"); writer.Write(" "); } EmitSingleChar(node, emitLengthCheck: false, clauseOnly: true); for (int i = 1; i < iterations; i++) { writer.WriteLine(" ||"); writer.Write(" "); EmitSingleChar(node, emitLengthCheck: false, clauseOnly: true); } writer.WriteLine(")"); using (EmitBlock(writer, null)) { Goto(doneLabel); } } else { // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length) goto doneLabel; if (emitLengthCheck) { EmitSpanLengthCheck(iterations); } string repeaterSpan = "repeaterSlice"; // As this repeater doesn't wrap arbitrary node emits, this shouldn't conflict with anything writer.WriteLine($"global::System.ReadOnlySpan<char> {repeaterSpan} = {sliceSpan}.Slice({sliceStaticPos}, {iterations});"); using (EmitBlock(writer, $"for (int i = 0; i < {repeaterSpan}.Length; i++)")) { EmitTimeoutCheck(writer, hasTimeout); string tmpTextSpanLocal = sliceSpan; // we want EmitSingleChar to refer to this temporary int tmpSliceStaticPos = sliceStaticPos; sliceSpan = repeaterSpan; sliceStaticPos = 0; EmitSingleChar(node, emitLengthCheck: false, offset: "i"); sliceSpan = tmpTextSpanLocal; sliceStaticPos = tmpSliceStaticPos; } sliceStaticPos += iterations; } } // Emits the code to handle a non-backtracking, variable-length loop around a single character comparison. void EmitSingleCharAtomicLoop(RegexNode node, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // If this is actually an optional single char, emit that instead. if (node.M == 0 && node.N == 1) { EmitAtomicSingleCharZeroOrOne(node); return; } Debug.Assert(node.N > node.M); int minIterations = node.M; int maxIterations = node.N; Span<char> setChars = stackalloc char[5]; // 5 is max optimized by IndexOfAny today int numSetChars = 0; string iterationLocal = ReserveName("iteration"); if (node.IsNotoneFamily && maxIterations == int.MaxValue && (!IsCaseInsensitive(node))) { // For Notone, we're looking for a specific character, as everything until we find // it is consumed by the loop. If we're unbounded, such as with ".*" and if we're case-sensitive, // we can use the vectorized IndexOf to do the search, rather than open-coding it. The unbounded // restriction is purely for simplicity; it could be removed in the future with additional code to // handle the unbounded case. writer.Write($"int {iterationLocal} = global::System.MemoryExtensions.IndexOf({sliceSpan}"); if (sliceStaticPos > 0) { writer.Write($".Slice({sliceStaticPos})"); } writer.WriteLine($", {Literal(node.Ch)});"); using (EmitBlock(writer, $"if ({iterationLocal} < 0)")) { writer.WriteLine(sliceStaticPos > 0 ? $"{iterationLocal} = {sliceSpan}.Length - {sliceStaticPos};" : $"{iterationLocal} = {sliceSpan}.Length;"); } writer.WriteLine(); } else if (node.IsSetFamily && maxIterations == int.MaxValue && !IsCaseInsensitive(node) && (numSetChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0 && RegexCharClass.IsNegated(node.Str!)) { // If the set is negated and contains only a few characters (if it contained 1 and was negated, it should // have been reduced to a Notone), we can use an IndexOfAny to find any of the target characters. // As with the notoneloopatomic above, the unbounded constraint is purely for simplicity. Debug.Assert(numSetChars > 1); writer.Write($"int {iterationLocal} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}"); if (sliceStaticPos != 0) { writer.Write($".Slice({sliceStaticPos})"); } writer.WriteLine(numSetChars switch { 2 => $", {Literal(setChars[0])}, {Literal(setChars[1])});", 3 => $", {Literal(setChars[0])}, {Literal(setChars[1])}, {Literal(setChars[2])});", _ => $", {Literal(setChars.Slice(0, numSetChars).ToString())});", }); using (EmitBlock(writer, $"if ({iterationLocal} < 0)")) { writer.WriteLine(sliceStaticPos > 0 ? $"{iterationLocal} = {sliceSpan}.Length - {sliceStaticPos};" : $"{iterationLocal} = {sliceSpan}.Length;"); } writer.WriteLine(); } else if (node.IsSetFamily && maxIterations == int.MaxValue && node.Str == RegexCharClass.AnyClass) { // .* was used with RegexOptions.Singleline, which means it'll consume everything. Just jump to the end. // The unbounded constraint is the same as in the Notone case above, done purely for simplicity. TransferSliceStaticPosToPos(); writer.WriteLine($"int {iterationLocal} = inputSpan.Length - pos;"); } else { // For everything else, do a normal loop. string expr = $"{sliceSpan}[{iterationLocal}]"; if (node.IsSetFamily) { expr = MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: false, additionalDeclarations, ref requiredHelpers); } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "==" : "!=")} {Literal(node.Ch)}"; } if (minIterations != 0 || maxIterations != int.MaxValue) { // For any loops other than * loops, transfer text pos to pos in // order to zero it out to be able to use the single iteration variable // for both iteration count and indexer. TransferSliceStaticPosToPos(); } writer.WriteLine($"int {iterationLocal} = {sliceStaticPos};"); sliceStaticPos = 0; string maxClause = maxIterations != int.MaxValue ? $"{CountIsLessThan(iterationLocal, maxIterations)} && " : ""; using (EmitBlock(writer, $"while ({maxClause}(uint){iterationLocal} < (uint){sliceSpan}.Length && {expr})")) { EmitTimeoutCheck(writer, hasTimeout); writer.WriteLine($"{iterationLocal}++;"); } writer.WriteLine(); } // Check to ensure we've found at least min iterations. if (minIterations > 0) { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationLocal, minIterations)})")) { Goto(doneLabel); } writer.WriteLine(); } // Now that we've completed our optional iterations, advance the text span // and pos by the number of iterations completed. writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice({iterationLocal});"); writer.WriteLine($"pos += {iterationLocal};"); } // Emits the code to handle a non-backtracking optional zero-or-one loop. void EmitAtomicSingleCharZeroOrOne(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M == 0 && node.N == 1); string expr = $"{sliceSpan}[{sliceStaticPos}]"; if (node.IsSetFamily) { expr = MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: false, additionalDeclarations, ref requiredHelpers); } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "==" : "!=")} {Literal(node.Ch)}"; } string spaceAvailable = sliceStaticPos != 0 ? $"(uint){sliceSpan}.Length > (uint){sliceStaticPos}" : $"!{sliceSpan}.IsEmpty"; using (EmitBlock(writer, $"if ({spaceAvailable} && {expr})")) { writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice(1);"); writer.WriteLine($"pos++;"); } } void EmitNonBacktrackingRepeater(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.M == node.N, $"Unexpected M={node.M} == N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(!analysis.MayBacktrack(node.Child(0)), $"Expected non-backtracking node {node.Kind}"); // Ensure every iteration of the loop sees a consistent value. TransferSliceStaticPosToPos(); // Loop M==N times to match the child exactly that numbers of times. string i = ReserveName("loop_iteration"); using (EmitBlock(writer, $"for (int {i} = 0; {i} < {node.M}; {i}++)")) { EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // make sure static the static position remains at 0 for subsequent constructs } } void EmitLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); string originalDoneLabel = doneLabel; string startingPos = ReserveName("loop_starting_pos"); string iterationCount = ReserveName("loop_iteration"); string body = ReserveName("LoopBody"); string endLoop = ReserveName("LoopEnd"); additionalDeclarations.Add($"int {iterationCount} = 0, {startingPos} = 0;"); writer.WriteLine($"{iterationCount} = 0;"); writer.WriteLine($"{startingPos} = pos;"); writer.WriteLine(); // Iteration body MarkLabel(body, emitSemicolon: false); EmitTimeoutCheck(writer, hasTimeout); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackPush(expressionHasCaptures ? new[] { "base.Crawlpos()", startingPos, "pos" } : new[] { startingPos, "pos" }); writer.WriteLine(); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. writer.WriteLine($"{startingPos} = pos;"); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. writer.WriteLine($"{iterationCount}++;"); writer.WriteLine(); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. string iterationFailedLabel = ReserveName("LoopIterationNoMatch"); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); writer.WriteLine(); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 bool childBacktracks = doneLabel != iterationFailedLabel; // Loop condition. Continue iterating greedily if we've not yet reached the maximum. We also need to stop // iterating if the iteration matched empty and we already hit the minimum number of iterations. using (EmitBlock(writer, (minIterations > 0, maxIterations == int.MaxValue) switch { (true, true) => $"if (pos != {startingPos} || {CountIsLessThan(iterationCount, minIterations)})", (true, false) => $"if ((pos != {startingPos} || {CountIsLessThan(iterationCount, minIterations)}) && {CountIsLessThan(iterationCount, maxIterations)})", (false, true) => $"if (pos != {startingPos})", (false, false) => $"if (pos != {startingPos} && {CountIsLessThan(iterationCount, maxIterations)})", })) { Goto(body); } // We've matched as many iterations as we can with this configuration. Jump to what comes after the loop. Goto(endLoop); writer.WriteLine(); // Now handle what happens when an iteration fails, which could be an initial failure or it // could be while backtracking. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel, emitSemicolon: false); writer.WriteLine($"{iterationCount}--;"); using (EmitBlock(writer, $"if ({iterationCount} < 0)")) { Goto(originalDoneLabel); } EmitStackPop("pos", startingPos); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } SliceInputSpan(writer); if (minIterations > 0) { using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, minIterations)})")) { Goto(childBacktracks ? doneLabel : originalDoneLabel); } } if (isAtomic) { doneLabel = originalDoneLabel; MarkLabel(endLoop); } else { if (childBacktracks) { Goto(endLoop); writer.WriteLine(); string backtrack = ReserveName("LoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } Goto(doneLabel); doneLabel = backtrack; } MarkLabel(endLoop); if (node.IsInLoop()) { writer.WriteLine(); // Store the loop's state EmitStackPush(startingPos, iterationCount); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label string backtrack = ReserveName("LoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(iterationCount, startingPos); Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } } // Gets a comparison for whether the value is less than the upper bound. static string CountIsLessThan(string value, int exclusiveUpper) => exclusiveUpper == 1 ? $"{value} == 0" : $"{value} < {exclusiveUpper}"; // Emits code to unwind the capture stack until the crawl position specified in the provided local. void EmitUncaptureUntil(string capturepos) { string name = "UncaptureUntil"; if (!additionalLocalFunctions.ContainsKey(name)) { var lines = new string[9]; lines[0] = "// <summary>Undo captures until we reach the specified capture position.</summary>"; lines[1] = "[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"void {name}(int capturepos)"; lines[3] = "{"; lines[4] = " while (base.Crawlpos() > capturepos)"; lines[5] = " {"; lines[6] = " base.Uncapture();"; lines[7] = " }"; lines[8] = "}"; additionalLocalFunctions.Add(name, lines); } writer.WriteLine($"{name}({capturepos});"); } /// <summary>Pushes values on to the backtracking stack.</summary> void EmitStackPush(params string[] args) { Debug.Assert(args.Length is >= 1); string function = $"StackPush{args.Length}"; additionalDeclarations.Add("int stackpos = 0;"); if (!additionalLocalFunctions.ContainsKey(function)) { var lines = new string[24 + args.Length]; lines[0] = $"// <summary>Push {args.Length} value{(args.Length == 1 ? "" : "s")} onto the backtracking stack.</summary>"; lines[1] = $"[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"static void {function}(ref int[] stack, ref int pos{FormatN(", int arg{0}", args.Length)})"; lines[3] = $"{{"; lines[4] = $" // If there's space available for {(args.Length > 1 ? $"all {args.Length} values, store them" : "the value, store it")}."; lines[5] = $" int[] s = stack;"; lines[6] = $" int p = pos;"; lines[7] = $" if ((uint){(args.Length > 1 ? $"(p + {args.Length - 1})" : "p")} < (uint)s.Length)"; lines[8] = $" {{"; for (int i = 0; i < args.Length; i++) { lines[9 + i] = $" s[p{(i == 0 ? "" : $" + {i}")}] = arg{i};"; } lines[9 + args.Length] = args.Length > 1 ? $" pos += {args.Length};" : " pos++;"; lines[10 + args.Length] = $" return;"; lines[11 + args.Length] = $" }}"; lines[12 + args.Length] = $""; lines[13 + args.Length] = $" // Otherwise, resize the stack to make room and try again."; lines[14 + args.Length] = $" WithResize(ref stack, ref pos{FormatN(", arg{0}", args.Length)});"; lines[15 + args.Length] = $""; lines[16 + args.Length] = $" // <summary>Resize the backtracking stack array and push {args.Length} value{(args.Length == 1 ? "" : "s")} onto the stack.</summary>"; lines[17 + args.Length] = $" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.NoInlining)]"; lines[18 + args.Length] = $" static void WithResize(ref int[] stack, ref int pos{FormatN(", int arg{0}", args.Length)})"; lines[19 + args.Length] = $" {{"; lines[20 + args.Length] = $" global::System.Array.Resize(ref stack, (pos + {args.Length - 1}) * 2);"; lines[21 + args.Length] = $" {function}(ref stack, ref pos{FormatN(", arg{0}", args.Length)});"; lines[22 + args.Length] = $" }}"; lines[23 + args.Length] = $"}}"; additionalLocalFunctions.Add(function, lines); } writer.WriteLine($"{function}(ref base.runstack!, ref stackpos, {string.Join(", ", args)});"); } /// <summary>Pops values from the backtracking stack into the specified locations.</summary> void EmitStackPop(params string[] args) { Debug.Assert(args.Length is >= 1); if (args.Length == 1) { writer.WriteLine($"{args[0]} = {StackPop()};"); return; } string function = $"StackPop{args.Length}"; if (!additionalLocalFunctions.ContainsKey(function)) { var lines = new string[5 + args.Length]; lines[0] = $"// <summary>Pop {args.Length} value{(args.Length == 1 ? "" : "s")} from the backtracking stack.</summary>"; lines[1] = $"[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"static void {function}(int[] stack, ref int pos{FormatN(", out int arg{0}", args.Length)})"; lines[3] = $"{{"; for (int i = 0; i < args.Length; i++) { lines[4 + i] = $" arg{i} = stack[--pos];"; } lines[4 + args.Length] = $"}}"; additionalLocalFunctions.Add(function, lines); } writer.WriteLine($"{function}(base.runstack, ref stackpos, out {string.Join(", out ", args)});"); } /// <summary>Expression for popping the next item from the backtracking stack.</summary> string StackPop() => "base.runstack![--stackpos]"; /// <summary>Concatenates the strings resulting from formatting the format string with the values [0, count).</summary> static string FormatN(string format, int count) => string.Concat(from i in Enumerable.Range(0, count) select string.Format(format, i)); } private static bool EmitLoopTimeoutCounterIfNeeded(IndentedTextWriter writer, RegexMethod rm) { if (rm.MatchTimeout != Timeout.Infinite) { writer.WriteLine("int loopTimeoutCounter = 0;"); return true; } return false; } /// <summary>Emits a timeout check.</summary> private static void EmitTimeoutCheck(IndentedTextWriter writer, bool hasTimeout) { const int LoopTimeoutCheckCount = 2048; // A conservative value to guarantee the correct timeout handling. if (hasTimeout) { // Increment counter for each loop iteration. // Emit code to check the timeout every 2048th iteration. using (EmitBlock(writer, $"if (++loopTimeoutCounter == {LoopTimeoutCheckCount})")) { writer.WriteLine("loopTimeoutCounter = 0;"); writer.WriteLine("base.CheckTimeout();"); } writer.WriteLine(); } } private static bool EmitInitializeCultureForTryMatchAtCurrentPositionIfNecessary(IndentedTextWriter writer, RegexMethod rm, AnalysisResults analysis) { if (analysis.HasIgnoreCase && ((RegexOptions)rm.Options & RegexOptions.CultureInvariant) == 0) { writer.WriteLine("global::System.Globalization.TextInfo textInfo = global::System.Globalization.CultureInfo.CurrentCulture.TextInfo;"); return true; } return false; } private static bool UseToLowerInvariant(bool hasTextInfo, RegexOptions options) => !hasTextInfo || (options & RegexOptions.CultureInvariant) != 0; private static string ToLower(bool hasTextInfo, RegexOptions options, string expression) => UseToLowerInvariant(hasTextInfo, options) ? $"char.ToLowerInvariant({expression})" : $"textInfo.ToLower({expression})"; private static string ToLowerIfNeeded(bool hasTextInfo, RegexOptions options, string expression, bool toLower) => toLower ? ToLower(hasTextInfo, options, expression) : expression; private static string MatchCharacterClass(bool hasTextInfo, RegexOptions options, string chExpr, string charClass, bool caseInsensitive, bool negate, HashSet<string> additionalDeclarations, ref RequiredHelperFunctions requiredHelpers) { // We need to perform the equivalent of calling RegexRunner.CharInClass(ch, charClass), // but that call is relatively expensive. Before we fall back to it, we try to optimize // some common cases for which we can do much better, such as known character classes // for which we can call a dedicated method, or a fast-path for ASCII using a lookup table. // First, see if the char class is a built-in one for which there's a better function // we can just call directly. Everything in this section must work correctly for both // case-sensitive and case-insensitive modes, regardless of culture. switch (charClass) { case RegexCharClass.AnyClass: // ideally this could just be "return true;", but we need to evaluate the expression for its side effects return $"({chExpr} {(negate ? "<" : ">=")} 0)"; // a char is unsigned and thus won't ever be negative case RegexCharClass.DigitClass: case RegexCharClass.NotDigitClass: negate ^= charClass == RegexCharClass.NotDigitClass; return $"{(negate ? "!" : "")}char.IsDigit({chExpr})"; case RegexCharClass.SpaceClass: case RegexCharClass.NotSpaceClass: negate ^= charClass == RegexCharClass.NotSpaceClass; return $"{(negate ? "!" : "")}char.IsWhiteSpace({chExpr})"; case RegexCharClass.WordClass: case RegexCharClass.NotWordClass: requiredHelpers |= RequiredHelperFunctions.IsWordChar; negate ^= charClass == RegexCharClass.NotWordClass; return $"{(negate ? "!" : "")}IsWordChar({chExpr})"; } // If we're meant to be doing a case-insensitive lookup, and if we're not using the invariant culture, // lowercase the input. If we're using the invariant culture, we may still end up calling ToLower later // on, but we may also be able to avoid it, in particular in the case of our lookup table, where we can // generate the lookup table already factoring in the invariant case sensitivity. There are multiple // special-code paths between here and the lookup table, but we only take those if invariant is false; // if it were true, they'd need to use CallToLower(). bool invariant = false; if (caseInsensitive) { invariant = UseToLowerInvariant(hasTextInfo, options); if (!invariant) { chExpr = ToLower(hasTextInfo, options, chExpr); } } // Next, handle simple sets of one range, e.g. [A-Z], [0-9], etc. This includes some built-in classes, like ECMADigitClass. if (!invariant && RegexCharClass.TryGetSingleRange(charClass, out char lowInclusive, out char highInclusive)) { negate ^= RegexCharClass.IsNegated(charClass); return lowInclusive == highInclusive ? $"({chExpr} {(negate ? "!=" : "==")} {Literal(lowInclusive)})" : $"(((uint){chExpr}) - {Literal(lowInclusive)} {(negate ? ">" : "<=")} (uint)({Literal(highInclusive)} - {Literal(lowInclusive)}))"; } // Next if the character class contains nothing but a single Unicode category, we can calle char.GetUnicodeCategory and // compare against it. It has a fast-lookup path for ASCII, so is as good or better than any lookup we'd generate (plus // we get smaller code), and it's what we'd do for the fallback (which we get to avoid generating) as part of CharInClass. if (!invariant && RegexCharClass.TryGetSingleUnicodeCategory(charClass, out UnicodeCategory category, out bool negated)) { negate ^= negated; return $"(char.GetUnicodeCategory({chExpr}) {(negate ? "!=" : "==")} global::System.Globalization.UnicodeCategory.{category})"; } // Next, if there's only 2 or 3 chars in the set (fairly common due to the sets we create for prefixes), // it may be cheaper and smaller to compare against each than it is to use a lookup table. We can also special-case // the very common case with case insensitivity of two characters next to each other being the upper and lowercase // ASCII variants of each other, in which case we can use bit manipulation to avoid a comparison. if (!invariant && !RegexCharClass.IsNegated(charClass)) { Span<char> setChars = stackalloc char[3]; int mask; switch (RegexCharClass.GetSetChars(charClass, setChars)) { case 2: if (RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out mask)) { return $"(({chExpr} | 0x{mask:X}) {(negate ? "!=" : "==")} {Literal((char)(setChars[1] | mask))})"; } additionalDeclarations.Add("char ch;"); return negate ? $"(((ch = {chExpr}) != {Literal(setChars[0])}) & (ch != {Literal(setChars[1])}))" : $"(((ch = {chExpr}) == {Literal(setChars[0])}) | (ch == {Literal(setChars[1])}))"; case 3: additionalDeclarations.Add("char ch;"); return (negate, RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out mask)) switch { (false, false) => $"(((ch = {chExpr}) == {Literal(setChars[0])}) | (ch == {Literal(setChars[1])}) | (ch == {Literal(setChars[2])}))", (true, false) => $"(((ch = {chExpr}) != {Literal(setChars[0])}) & (ch != {Literal(setChars[1])}) & (ch != {Literal(setChars[2])}))", (false, true) => $"((((ch = {chExpr}) | 0x{mask:X}) == {Literal((char)(setChars[1] | mask))}) | (ch == {Literal(setChars[2])}))", (true, true) => $"((((ch = {chExpr}) | 0x{mask:X}) != {Literal((char)(setChars[1] | mask))}) & (ch != {Literal(setChars[2])}))", }; } } // All options after this point require a ch local. additionalDeclarations.Add("char ch;"); // Analyze the character set more to determine what code to generate. RegexCharClass.CharClassAnalysisResults analysis = RegexCharClass.Analyze(charClass); if (!invariant) // if we're being asked to do a case insensitive, invariant comparison, use the lookup table { if (analysis.ContainsNoAscii) { // We determined that the character class contains only non-ASCII, // for example if the class were [\p{IsGreek}\p{IsGreekExtended}], which is // the same as [\u0370-\u03FF\u1F00-1FFF]. (In the future, we could possibly // extend the analysis to produce a known lower-bound and compare against // that rather than always using 128 as the pivot point.) return negate ? $"((ch = {chExpr}) < 128 || !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))" : $"((ch = {chExpr}) >= 128 && global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))"; } if (analysis.AllAsciiContained) { // We determined that every ASCII character is in the class, for example // if the class were the negated example from case 1 above: // [^\p{IsGreek}\p{IsGreekExtended}]. return negate ? $"((ch = {chExpr}) >= 128 && !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))" : $"((ch = {chExpr}) < 128 || global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))"; } } // Now, our big hammer is to generate a lookup table that lets us quickly index by character into a yes/no // answer as to whether the character is in the target character class. However, we don't want to store // a lookup table for every possible character for every character class in the regular expression; at one // bit for each of 65K characters, that would be an 8K bitmap per character class. Instead, we handle the // common case of ASCII input via such a lookup table, which at one bit for each of 128 characters is only // 16 bytes per character class. We of course still need to be able to handle inputs that aren't ASCII, so // we check the input against 128, and have a fallback if the input is >= to it. Determining the right // fallback could itself be expensive. For example, if it's possible that a value >= 128 could match the // character class, we output a call to RegexRunner.CharInClass, but we don't want to have to enumerate the // entire character class evaluating every character against it, just to determine whether it's a match. // Instead, we employ some quick heuristics that will always ensure we provide a correct answer even if // we could have sometimes generated better code to give that answer. // Generate the lookup table to store 128 answers as bits. We use a const string instead of a byte[] / static // data property because it lets IL emit handle all the details for us. string bitVectorString = StringExtensions.Create(8, (charClass, invariant), static (dest, state) => // String length is 8 chars == 16 bytes == 128 bits. { for (int i = 0; i < 128; i++) { char c = (char)i; bool isSet = state.invariant ? RegexCharClass.CharInClass(char.ToLowerInvariant(c), state.charClass) : RegexCharClass.CharInClass(c, state.charClass); if (isSet) { dest[i >> 4] |= (char)(1 << (i & 0xF)); } } }); // We determined that the character class may contain ASCII, so we // output the lookup against the lookup table. if (analysis.ContainsOnlyAscii) { // We know that all inputs that could match are ASCII, for example if the // character class were [A-Za-z0-9], so since the ch is now known to be >= 128, we // can just fail the comparison. return negate ? $"((ch = {chExpr}) >= 128 || ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0)" : $"((ch = {chExpr}) < 128 && ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0)"; } if (analysis.AllNonAsciiContained) { // We know that all non-ASCII inputs match, for example if the character // class were [^\r\n], so since we just determined the ch to be >= 128, we can just // give back success. return negate ? $"((ch = {chExpr}) < 128 && ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0)" : $"((ch = {chExpr}) >= 128 || ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0)"; } // We know that the whole class wasn't ASCII, and we don't know anything about the non-ASCII // characters other than that some might be included, for example if the character class // were [\w\d], so since ch >= 128, we need to fall back to calling CharInClass. return (negate, invariant) switch { (false, false) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0 : global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))", (true, false) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0 : !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))", (false, true) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0 : global::System.Text.RegularExpressions.RegexRunner.CharInClass(char.ToLowerInvariant((char)ch), {Literal(charClass)}))", (true, true) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0 : !global::System.Text.RegularExpressions.RegexRunner.CharInClass(char.ToLowerInvariant((char)ch), {Literal(charClass)}))", }; } /// <summary> /// Replaces <see cref="AdditionalDeclarationsPlaceholder"/> in <paramref name="writer"/> with /// all of the variable declarations in <paramref name="declarations"/>. /// </summary> /// <param name="writer">The writer around a StringWriter to have additional declarations inserted into.</param> /// <param name="declarations">The additional declarations to insert.</param> /// <param name="position">The position into the writer at which to insert the additional declarations.</param> /// <param name="indent">The indentation to use for the additional declarations.</param> private static void ReplaceAdditionalDeclarations(IndentedTextWriter writer, HashSet<string> declarations, int position, int indent) { if (declarations.Count != 0) { var tmp = new StringBuilder(); foreach (string decl in declarations.OrderBy(s => s)) { for (int i = 0; i < indent; i++) { tmp.Append(IndentedTextWriter.DefaultTabString); } tmp.AppendLine(decl); } ((StringWriter)writer.InnerWriter).GetStringBuilder().Insert(position, tmp.ToString()); } } /// <summary>Formats the character as valid C#.</summary> private static string Literal(char c) => SymbolDisplay.FormatLiteral(c, quote: true); /// <summary>Formats the string as valid C#.</summary> private static string Literal(string s) => SymbolDisplay.FormatLiteral(s, quote: true); private static string Literal(RegexOptions options) { string s = options.ToString(); if (int.TryParse(s, out _)) { // The options were formatted as an int, which means the runtime couldn't // produce a textual representation. So just output casting the value as an int. return $"(global::System.Text.RegularExpressions.RegexOptions)({(int)options})"; } // Parse the runtime-generated "Option1, Option2" into each piece and then concat // them back together. string[] parts = s.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); for (int i = 0; i < parts.Length; i++) { parts[i] = "global::System.Text.RegularExpressions.RegexOptions." + parts[i].Trim(); } return string.Join(" | ", parts); } /// <summary>Gets a textual description of the node fit for rendering in a comment in source.</summary> private static string DescribeNode(RegexNode node, AnalysisResults analysis) => node.Kind switch { RegexNodeKind.Alternate => $"Match with {node.ChildCount()} alternative expressions{(analysis.IsAtomicByAncestor(node) ? ", atomically" : "")}.", RegexNodeKind.Atomic => $"Atomic group.", RegexNodeKind.Beginning => "Match if at the beginning of the string.", RegexNodeKind.Bol => "Match if at the beginning of a line.", RegexNodeKind.Boundary => $"Match if at a word boundary.", RegexNodeKind.Capture when node.M == -1 && node.N != -1 => $"Non-capturing balancing group. Uncaptures the {DescribeCapture(node.N, analysis)}.", RegexNodeKind.Capture when node.N != -1 => $"Balancing group. Captures the {DescribeCapture(node.M, analysis)} and uncaptures the {DescribeCapture(node.N, analysis)}.", RegexNodeKind.Capture when node.N == -1 => $"{DescribeCapture(node.M, analysis)}.", RegexNodeKind.Concatenate => "Match a sequence of expressions.", RegexNodeKind.ECMABoundary => $"Match if at a word boundary (according to ECMAScript rules).", RegexNodeKind.Empty => $"Match an empty string.", RegexNodeKind.End => "Match if at the end of the string.", RegexNodeKind.EndZ => "Match if at the end of the string or if before an ending newline.", RegexNodeKind.Eol => "Match if at the end of a line.", RegexNodeKind.Loop or RegexNodeKind.Lazyloop => node.M == 0 && node.N == 1 ? $"Optional ({(node.Kind is RegexNodeKind.Loop ? "greedy" : "lazy")})." : $"Loop {DescribeLoop(node, analysis)}.", RegexNodeKind.Multi => $"Match the string {Literal(node.Str!)}.", RegexNodeKind.NonBoundary => $"Match if at anything other than a word boundary.", RegexNodeKind.NonECMABoundary => $"Match if at anything other than a word boundary (according to ECMAScript rules).", RegexNodeKind.Nothing => $"Fail to match.", RegexNodeKind.Notone => $"Match any character other than {Literal(node.Ch)}.", RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy => $"Match a character other than {Literal(node.Ch)} {DescribeLoop(node, analysis)}.", RegexNodeKind.One => $"Match {Literal(node.Ch)}.", RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy => $"Match {Literal(node.Ch)} {DescribeLoop(node, analysis)}.", RegexNodeKind.NegativeLookaround => $"Zero-width negative lookahead assertion.", RegexNodeKind.Backreference => $"Match the same text as matched by the {DescribeCapture(node.M, analysis)}.", RegexNodeKind.PositiveLookaround => $"Zero-width positive lookahead assertion.", RegexNodeKind.Set => $"Match {DescribeSet(node.Str!)}.", RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy => $"Match {DescribeSet(node.Str!)} {DescribeLoop(node, analysis)}.", RegexNodeKind.Start => "Match if at the start position.", RegexNodeKind.ExpressionConditional => $"Conditionally match one of two expressions depending on whether an initial expression matches.", RegexNodeKind.BackreferenceConditional => $"Conditionally match one of two expressions depending on whether the {DescribeCapture(node.M, analysis)} matched.", RegexNodeKind.UpdateBumpalong => $"Advance the next matching position.", _ => $"Unknown node type {node.Kind}", }; /// <summary>Gets an identifer to describe a capture group.</summary> private static string DescribeCapture(int capNum, AnalysisResults analysis) { // If we can get a capture name from the captures collection and it's not just a numerical representation of the group, use it. string name = RegexParser.GroupNameFromNumber(analysis.RegexTree.CaptureNumberSparseMapping, analysis.RegexTree.CaptureNames, analysis.RegexTree.CaptureCount, capNum); if (!string.IsNullOrEmpty(name) && (!int.TryParse(name, out int id) || id != capNum)) { name = Literal(name); } else { // Otherwise, create a numerical description of the capture group. int tens = capNum % 10; name = tens is >= 1 and <= 3 && capNum % 100 is < 10 or > 20 ? // Ends in 1, 2, 3 but not 11, 12, or 13 tens switch { 1 => $"{capNum}st", 2 => $"{capNum}nd", _ => $"{capNum}rd", } : $"{capNum}th"; } return $"{name} capture group"; } /// <summary>Gets a textual description of what characters match a set.</summary> private static string DescribeSet(string charClass) => charClass switch { RegexCharClass.AnyClass => "any character", RegexCharClass.DigitClass => "a Unicode digit", RegexCharClass.ECMADigitClass => "'0' through '9'", RegexCharClass.ECMASpaceClass => "a whitespace character (ECMA)", RegexCharClass.ECMAWordClass => "a word character (ECMA)", RegexCharClass.NotDigitClass => "any character other than a Unicode digit", RegexCharClass.NotECMADigitClass => "any character other than '0' through '9'", RegexCharClass.NotECMASpaceClass => "any character other than a space character (ECMA)", RegexCharClass.NotECMAWordClass => "any character other than a word character (ECMA)", RegexCharClass.NotSpaceClass => "any character other than a space character", RegexCharClass.NotWordClass => "any character other than a word character", RegexCharClass.SpaceClass => "a whitespace character", RegexCharClass.WordClass => "a word character", _ => $"a character in the set {RegexCharClass.DescribeSet(charClass)}", }; /// <summary>Writes a textual description of the node tree fit for rending in source.</summary> /// <param name="writer">The writer to which the description should be written.</param> /// <param name="node">The node being written.</param> /// <param name="prefix">The prefix to write at the beginning of every line, including a "//" for a comment.</param> /// <param name="analyses">Analysis of the tree</param> /// <param name="depth">The depth of the current node.</param> private static void DescribeExpression(TextWriter writer, RegexNode node, string prefix, AnalysisResults analysis, int depth = 0) { bool skip = node.Kind switch { // For concatenations, flatten the contents into the parent, but only if the parent isn't a form of alternation, // where each branch is considered to be independent rather than a concatenation. RegexNodeKind.Concatenate when node.Parent is not { Kind: RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional } => true, // For atomic, skip the node if we'll instead render the atomic label as part of rendering the child. RegexNodeKind.Atomic when node.Child(0).Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop or RegexNodeKind.Alternate => true, // Don't skip anything else. _ => false, }; if (!skip) { string tag = node.Parent?.Kind switch { RegexNodeKind.ExpressionConditional when node.Parent.Child(0) == node => "Condition: ", RegexNodeKind.ExpressionConditional when node.Parent.Child(1) == node => "Matched: ", RegexNodeKind.ExpressionConditional when node.Parent.Child(2) == node => "Not Matched: ", RegexNodeKind.BackreferenceConditional when node.Parent.Child(0) == node => "Matched: ", RegexNodeKind.BackreferenceConditional when node.Parent.Child(1) == node => "Not Matched: ", _ => "", }; // Write out the line for the node. const char BulletPoint = '\u25CB'; writer.WriteLine($"{prefix}{new string(' ', depth * 4)}{BulletPoint} {tag}{DescribeNode(node, analysis)}"); } // Recur into each of its children. int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { int childDepth = skip ? depth : depth + 1; DescribeExpression(writer, node.Child(i), prefix, analysis, childDepth); } } /// <summary>Gets a textual description of a loop's style and bounds.</summary> private static string DescribeLoop(RegexNode node, AnalysisResults analysis) { string style = node.Kind switch { _ when node.M == node.N => "exactly", RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloopatomic => "atomically", RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop => "greedily", RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy => "lazily", RegexNodeKind.Loop => analysis.IsAtomicByAncestor(node) ? "greedily and atomically" : "greedily", _ /* RegexNodeKind.Lazyloop */ => analysis.IsAtomicByAncestor(node) ? "lazily and atomically" : "lazily", }; string bounds = node.M == node.N ? $" {node.M} times" : (node.M, node.N) switch { (0, int.MaxValue) => " any number of times", (1, int.MaxValue) => " at least once", (2, int.MaxValue) => " at least twice", (_, int.MaxValue) => $" at least {node.M} times", (0, 1) => ", optionally", (0, _) => $" at most {node.N} times", _ => $" at least {node.M} and at most {node.N} times" }; return style + bounds; } private static FinishEmitScope EmitScope(IndentedTextWriter writer, string title, bool faux = false) => EmitBlock(writer, $"// {title}", faux: faux); private static FinishEmitScope EmitBlock(IndentedTextWriter writer, string? clause, bool faux = false) { if (clause is not null) { writer.WriteLine(clause); } writer.WriteLine(faux ? "//{" : "{"); writer.Indent++; return new FinishEmitScope(writer, faux); } private static void EmitAdd(IndentedTextWriter writer, string variable, int value) { if (value == 0) { return; } writer.WriteLine( value == 1 ? $"{variable}++;" : value == -1 ? $"{variable}--;" : value > 0 ? $"{variable} += {value};" : value < 0 && value > int.MinValue ? $"{variable} -= {-value};" : $"{variable} += {value.ToString(CultureInfo.InvariantCulture)};"); } private readonly struct FinishEmitScope : IDisposable { private readonly IndentedTextWriter _writer; private readonly bool _faux; public FinishEmitScope(IndentedTextWriter writer, bool faux) { _writer = writer; _faux = faux; } public void Dispose() { if (_writer is not null) { _writer.Indent--; _writer.WriteLine(_faux ? "//}" : "}"); } } } /// <summary>Bit flags indicating which additional helpers should be emitted into the regex class.</summary> [Flags] private enum RequiredHelperFunctions { /// <summary>No additional functions are required.</summary> None = 0b0, /// <summary>The IsWordChar helper is required.</summary> IsWordChar = 0b1, /// <summary>The IsBoundary helper is required.</summary> IsBoundary = 0b10, /// <summary>The IsECMABoundary helper is required.</summary> IsECMABoundary = 0b100 } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers.Binary; using System.CodeDom.Compiler; using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; using System.Diagnostics; using System.Globalization; using System.IO; using System.Linq; using System.Runtime.InteropServices; using System.Threading; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp; // NOTE: The logic in this file is largely a copy of logic in RegexCompiler, emitting C# instead of MSIL. // Most changes made to this file should be kept in sync, so far as bug fixes and relevant optimizations // are concerned. namespace System.Text.RegularExpressions.Generator { public partial class RegexGenerator { /// <summary>Code for a [GeneratedCode] attribute to put on the top-level generated members.</summary> private static readonly string s_generatedCodeAttribute = $"[global::System.CodeDom.Compiler.GeneratedCodeAttribute(\"{typeof(RegexGenerator).Assembly.GetName().Name}\", \"{typeof(RegexGenerator).Assembly.GetName().Version}\")]"; /// <summary>Header comments and usings to include at the top of every generated file.</summary> private static readonly string[] s_headers = new string[] { "// <auto-generated/>", "#nullable enable", "#pragma warning disable CS0162 // Unreachable code", "#pragma warning disable CS0164 // Unreferenced label", "#pragma warning disable CS0219 // Variable assigned but never used", "", }; /// <summary>Generates the code for one regular expression class.</summary> private static (string, ImmutableArray<Diagnostic>) EmitRegexType(RegexType regexClass, bool allowUnsafe) { var sb = new StringBuilder(1024); var writer = new IndentedTextWriter(new StringWriter(sb)); // Emit the namespace if (!string.IsNullOrWhiteSpace(regexClass.Namespace)) { writer.WriteLine($"namespace {regexClass.Namespace}"); writer.WriteLine("{"); writer.Indent++; } // Emit containing types RegexType? parent = regexClass.ParentClass; var parentClasses = new Stack<string>(); while (parent is not null) { parentClasses.Push($"partial {parent.Keyword} {parent.Name}"); parent = parent.ParentClass; } while (parentClasses.Count != 0) { writer.WriteLine($"{parentClasses.Pop()}"); writer.WriteLine("{"); writer.Indent++; } // Emit the direct parent type writer.WriteLine($"partial {regexClass.Keyword} {regexClass.Name}"); writer.WriteLine("{"); writer.Indent++; // Generate a name to describe the regex instance. This includes the method name // the user provided and a non-randomized (for determinism) hash of it to try to make // the name that much harder to predict. Debug.Assert(regexClass.Method is not null); string generatedName = $"GeneratedRegex_{regexClass.Method.MethodName}_"; generatedName += ComputeStringHash(generatedName).ToString("X"); // Generate the regex type ImmutableArray<Diagnostic> diagnostics = EmitRegexMethod(writer, regexClass.Method, generatedName, allowUnsafe); while (writer.Indent != 0) { writer.Indent--; writer.WriteLine("}"); } writer.Flush(); return (sb.ToString(), diagnostics); // FNV-1a hash function. The actual algorithm used doesn't matter; just something simple // to create a deterministic, pseudo-random value that's based on input text. static uint ComputeStringHash(string s) { uint hashCode = 2166136261; foreach (char c in s) { hashCode = (c ^ hashCode) * 16777619; } return hashCode; } } /// <summary>Gets whether a given regular expression method is supported by the code generator.</summary> private static bool SupportsCodeGeneration(RegexMethod rm, out string? reason) { RegexNode root = rm.Tree.Root; if (!root.SupportsCompilation(out reason)) { return false; } if (ExceedsMaxDepthForSimpleCodeGeneration(root, allowedDepth: 40)) { // Deep RegexNode trees can result in emitting C# code that exceeds C# compiler // limitations, leading to "CS8078: An expression is too long or complex to compile". // Place an artificial limit on max tree depth in order to mitigate such issues. // The allowed depth can be tweaked as needed;its exceedingly rare to find // expressions with such deep trees. reason = "the regex will result in code that may exceed C# compiler limits"; return false; } return true; static bool ExceedsMaxDepthForSimpleCodeGeneration(RegexNode node, int allowedDepth) { if (allowedDepth <= 0) { return true; } int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { if (ExceedsMaxDepthForSimpleCodeGeneration(node.Child(i), allowedDepth - 1)) { return true; } } return false; } } /// <summary>Generates the code for a regular expression method.</summary> private static ImmutableArray<Diagnostic> EmitRegexMethod(IndentedTextWriter writer, RegexMethod rm, string id, bool allowUnsafe) { string patternExpression = Literal(rm.Pattern); string optionsExpression = Literal(rm.Options); string timeoutExpression = rm.MatchTimeout == Timeout.Infinite ? "global::System.Threading.Timeout.InfiniteTimeSpan" : $"global::System.TimeSpan.FromMilliseconds({rm.MatchTimeout.ToString(CultureInfo.InvariantCulture)})"; writer.WriteLine(s_generatedCodeAttribute); writer.WriteLine($"{rm.Modifiers} global::System.Text.RegularExpressions.Regex {rm.MethodName}() => {id}.Instance;"); writer.WriteLine(); writer.WriteLine(s_generatedCodeAttribute); writer.WriteLine("[global::System.ComponentModel.EditorBrowsable(global::System.ComponentModel.EditorBrowsableState.Never)]"); writer.WriteLine($"{(writer.Indent != 0 ? "private" : "internal")} sealed class {id} : global::System.Text.RegularExpressions.Regex"); writer.WriteLine("{"); writer.Write(" public static global::System.Text.RegularExpressions.Regex Instance { get; } = "); // If we can't support custom generation for this regex, spit out a Regex constructor call. if (!SupportsCodeGeneration(rm, out string? reason)) { writer.WriteLine(); writer.WriteLine($"// Cannot generate Regex-derived implementation because {reason}."); writer.WriteLine($"new global::System.Text.RegularExpressions.Regex({patternExpression}, {optionsExpression}, {timeoutExpression});"); writer.WriteLine("}"); return ImmutableArray.Create(Diagnostic.Create(DiagnosticDescriptors.LimitedSourceGeneration, rm.MethodSyntax.GetLocation())); } AnalysisResults analysis = RegexTreeAnalyzer.Analyze(rm.Tree); writer.WriteLine($"new {id}();"); writer.WriteLine(); writer.WriteLine($" private {id}()"); writer.WriteLine($" {{"); writer.WriteLine($" base.pattern = {patternExpression};"); writer.WriteLine($" base.roptions = {optionsExpression};"); writer.WriteLine($" base.internalMatchTimeout = {timeoutExpression};"); writer.WriteLine($" base.factory = new RunnerFactory();"); if (rm.Tree.CaptureNumberSparseMapping is not null) { writer.Write(" base.Caps = new global::System.Collections.Hashtable {"); AppendHashtableContents(writer, rm.Tree.CaptureNumberSparseMapping); writer.WriteLine(" };"); } if (rm.Tree.CaptureNameToNumberMapping is not null) { writer.Write(" base.CapNames = new global::System.Collections.Hashtable {"); AppendHashtableContents(writer, rm.Tree.CaptureNameToNumberMapping); writer.WriteLine(" };"); } if (rm.Tree.CaptureNames is not null) { writer.Write(" base.capslist = new string[] {"); string separator = ""; foreach (string s in rm.Tree.CaptureNames) { writer.Write(separator); writer.Write(Literal(s)); separator = ", "; } writer.WriteLine(" };"); } writer.WriteLine($" base.capsize = {rm.Tree.CaptureCount};"); writer.WriteLine($" }}"); writer.WriteLine(" "); writer.WriteLine($" private sealed class RunnerFactory : global::System.Text.RegularExpressions.RegexRunnerFactory"); writer.WriteLine($" {{"); writer.WriteLine($" protected override global::System.Text.RegularExpressions.RegexRunner CreateInstance() => new Runner();"); writer.WriteLine(); writer.WriteLine($" private sealed class Runner : global::System.Text.RegularExpressions.RegexRunner"); writer.WriteLine($" {{"); // Main implementation methods writer.WriteLine(" // Description:"); DescribeExpression(writer, rm.Tree.Root.Child(0), " // ", analysis); // skip implicit root capture writer.WriteLine(); writer.WriteLine($" protected override void Scan(global::System.ReadOnlySpan<char> text)"); writer.WriteLine($" {{"); writer.Indent += 4; EmitScan(writer, rm, id); writer.Indent -= 4; writer.WriteLine($" }}"); writer.WriteLine(); writer.WriteLine($" private bool TryFindNextPossibleStartingPosition(global::System.ReadOnlySpan<char> inputSpan)"); writer.WriteLine($" {{"); writer.Indent += 4; RequiredHelperFunctions requiredHelpers = EmitTryFindNextPossibleStartingPosition(writer, rm, id); writer.Indent -= 4; writer.WriteLine($" }}"); writer.WriteLine(); if (allowUnsafe) { writer.WriteLine($" [global::System.Runtime.CompilerServices.SkipLocalsInit]"); } writer.WriteLine($" private bool TryMatchAtCurrentPosition(global::System.ReadOnlySpan<char> inputSpan)"); writer.WriteLine($" {{"); writer.Indent += 4; requiredHelpers |= EmitTryMatchAtCurrentPosition(writer, rm, id, analysis); writer.Indent -= 4; writer.WriteLine($" }}"); if ((requiredHelpers & RequiredHelperFunctions.IsWordChar) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character is part of the [\\w] set.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsWordChar(char ch)"); writer.WriteLine($" {{"); writer.WriteLine($" global::System.ReadOnlySpan<byte> ascii = new byte[]"); writer.WriteLine($" {{"); writer.WriteLine($" 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03,"); writer.WriteLine($" 0xFE, 0xFF, 0xFF, 0x87, 0xFE, 0xFF, 0xFF, 0x07"); writer.WriteLine($" }};"); writer.WriteLine(); writer.WriteLine($" int chDiv8 = ch >> 3;"); writer.WriteLine($" return (uint)chDiv8 < (uint)ascii.Length ?"); writer.WriteLine($" (ascii[chDiv8] & (1 << (ch & 0x7))) != 0 :"); writer.WriteLine($" global::System.Globalization.CharUnicodeInfo.GetUnicodeCategory(ch) switch"); writer.WriteLine($" {{"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.UppercaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.LowercaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.TitlecaseLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.ModifierLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.OtherLetter or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.NonSpacingMark or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.DecimalDigitNumber or"); writer.WriteLine($" global::System.Globalization.UnicodeCategory.ConnectorPunctuation => true,"); writer.WriteLine($" _ => false,"); writer.WriteLine($" }};"); writer.WriteLine($" }}"); } if ((requiredHelpers & RequiredHelperFunctions.IsBoundary) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character at the specified index is a boundary.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsBoundary(global::System.ReadOnlySpan<char> inputSpan, int index)"); writer.WriteLine($" {{"); writer.WriteLine($" int indexM1 = index - 1;"); writer.WriteLine($" return ((uint)indexM1 < (uint)inputSpan.Length && IsBoundaryWordChar(inputSpan[indexM1])) !="); writer.WriteLine($" ((uint)index < (uint)inputSpan.Length && IsBoundaryWordChar(inputSpan[index]));"); writer.WriteLine(); writer.WriteLine($" static bool IsBoundaryWordChar(char ch) =>"); writer.WriteLine($" IsWordChar(ch) || (ch == '\\u200C' | ch == '\\u200D');"); writer.WriteLine($" }}"); } if ((requiredHelpers & RequiredHelperFunctions.IsECMABoundary) != 0) { writer.WriteLine(); writer.WriteLine($" /// <summary>Determines whether the character at the specified index is a boundary.</summary>"); writer.WriteLine($" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"); writer.WriteLine($" private static bool IsECMABoundary(global::System.ReadOnlySpan<char> inputSpan, int index)"); writer.WriteLine($" {{"); writer.WriteLine($" int indexM1 = index - 1;"); writer.WriteLine($" return ((uint)indexM1 < (uint)inputSpan.Length && IsECMAWordChar(inputSpan[indexM1])) !="); writer.WriteLine($" ((uint)index < (uint)inputSpan.Length && IsECMAWordChar(inputSpan[index]));"); writer.WriteLine(); writer.WriteLine($" static bool IsECMAWordChar(char ch) =>"); writer.WriteLine($" ((((uint)ch - 'A') & ~0x20) < 26) || // ASCII letter"); writer.WriteLine($" (((uint)ch - '0') < 10) || // digit"); writer.WriteLine($" ch == '_' || // underscore"); writer.WriteLine($" ch == '\\u0130'; // latin capital letter I with dot above"); writer.WriteLine($" }}"); } writer.WriteLine($" }}"); writer.WriteLine($" }}"); writer.WriteLine("}"); return ImmutableArray<Diagnostic>.Empty; static void AppendHashtableContents(IndentedTextWriter writer, Hashtable ht) { IDictionaryEnumerator en = ht.GetEnumerator(); string separator = ""; while (en.MoveNext()) { writer.Write(separator); separator = ", "; writer.Write(" { "); if (en.Key is int key) { writer.Write(key); } else { writer.Write($"\"{en.Key}\""); } writer.Write($", {en.Value} }} "); } } } /// <summary>Emits the body of the Scan method override.</summary> private static void EmitScan(IndentedTextWriter writer, RegexMethod rm, string id) { using (EmitBlock(writer, "while (TryFindNextPossibleStartingPosition(text))")) { if (rm.MatchTimeout != Timeout.Infinite) { writer.WriteLine("base.CheckTimeout();"); writer.WriteLine(); } writer.WriteLine("// If we find a match on the current position, or we have reached the end of the input, we are done."); using (EmitBlock(writer, "if (TryMatchAtCurrentPosition(text) || base.runtextpos == text.Length)")) { writer.WriteLine("return;"); } writer.WriteLine(); writer.WriteLine("base.runtextpos++;"); } } /// <summary>Emits the body of the TryFindNextPossibleStartingPosition.</summary> private static RequiredHelperFunctions EmitTryFindNextPossibleStartingPosition(IndentedTextWriter writer, RegexMethod rm, string id) { RegexOptions options = (RegexOptions)rm.Options; RegexTree regexTree = rm.Tree; bool hasTextInfo = false; RequiredHelperFunctions requiredHelpers = RequiredHelperFunctions.None; // In some cases, we need to emit declarations at the beginning of the method, but we only discover we need them later. // To handle that, we build up a collection of all the declarations to include, track where they should be inserted, // and then insert them at that position once everything else has been output. var additionalDeclarations = new HashSet<string>(); // Emit locals initialization writer.WriteLine("int pos = base.runtextpos;"); writer.Flush(); int additionalDeclarationsPosition = ((StringWriter)writer.InnerWriter).GetStringBuilder().Length; int additionalDeclarationsIndent = writer.Indent; writer.WriteLine(); // Generate length check. If the input isn't long enough to possibly match, fail quickly. // It's rare for min required length to be 0, so we don't bother special-casing the check, // especially since we want the "return false" code regardless. int minRequiredLength = rm.Tree.FindOptimizations.MinRequiredLength; Debug.Assert(minRequiredLength >= 0); string clause = minRequiredLength switch { 0 => "if (pos <= inputSpan.Length)", 1 => "if (pos < inputSpan.Length)", _ => $"if (pos < inputSpan.Length - {minRequiredLength - 1})" }; using (EmitBlock(writer, clause)) { // Emit any anchors. if (!EmitAnchors()) { // Either anchors weren't specified, or they don't completely root all matches to a specific location. // If whatever search operation we need to perform entails case-insensitive operations // that weren't already handled via creation of sets, we need to get an store the // TextInfo object to use (unless RegexOptions.CultureInvariant was specified). EmitTextInfo(writer, ref hasTextInfo, rm); // Emit the code for whatever find mode has been determined. switch (regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingPrefix_LeftToRight_CaseSensitive: Debug.Assert(!string.IsNullOrEmpty(regexTree.FindOptimizations.LeadingCaseSensitivePrefix)); EmitIndexOf(regexTree.FindOptimizations.LeadingCaseSensitivePrefix); break; case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive: Debug.Assert(regexTree.FindOptimizations.FixedDistanceSets is { Count: > 0 }); EmitFixedSet(); break; case FindNextStartingPositionMode.LiteralAfterLoop_LeftToRight_CaseSensitive: Debug.Assert(regexTree.FindOptimizations.LiteralAfterLoop is not null); EmitLiteralAfterAtomicLoop(); break; default: Debug.Fail($"Unexpected mode: {regexTree.FindOptimizations.FindMode}"); goto case FindNextStartingPositionMode.NoSearch; case FindNextStartingPositionMode.NoSearch: writer.WriteLine("return true;"); break; } } } writer.WriteLine(); const string NoStartingPositionFound = "NoStartingPositionFound"; writer.WriteLine("// No starting position found"); writer.WriteLine($"{NoStartingPositionFound}:"); writer.WriteLine("base.runtextpos = inputSpan.Length;"); writer.WriteLine("return false;"); // We're done. Patch up any additional declarations. ReplaceAdditionalDeclarations(writer, additionalDeclarations, additionalDeclarationsPosition, additionalDeclarationsIndent); return requiredHelpers; // Emit a goto for the specified label. void Goto(string label) => writer.WriteLine($"goto {label};"); // Emits any anchors. Returns true if the anchor roots any match to a specific location and thus no further // searching is required; otherwise, false. bool EmitAnchors() { // Anchors that fully implement TryFindNextPossibleStartingPosition, with a check that leads to immediate success or failure determination. switch (regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Beginning: writer.WriteLine("// Beginning \\A anchor"); using (EmitBlock(writer, "if (pos > 0)")) { Goto(NoStartingPositionFound); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Start: writer.WriteLine("// Start \\G anchor"); using (EmitBlock(writer, "if (pos > base.runtextstart)")) { Goto(NoStartingPositionFound); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_EndZ: writer.WriteLine("// Leading end \\Z anchor"); using (EmitBlock(writer, "if (pos < inputSpan.Length - 1)")) { writer.WriteLine("base.runtextpos = inputSpan.Length - 1;"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_End: writer.WriteLine("// Leading end \\z anchor"); using (EmitBlock(writer, "if (pos < inputSpan.Length)")) { writer.WriteLine("base.runtextpos = inputSpan.Length;"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ: // Jump to the end, minus the min required length, which in this case is actually the fixed length, minus 1 (for a possible ending \n). writer.WriteLine("// Trailing end \\Z anchor with fixed-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength + 1})")) { writer.WriteLine($"base.runtextpos = inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength + 1};"); } writer.WriteLine("return true;"); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_End: // Jump to the end, minus the min required length, which in this case is actually the fixed length. writer.WriteLine("// Trailing end \\z anchor with fixed-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength})")) { writer.WriteLine($"base.runtextpos = inputSpan.Length - {regexTree.FindOptimizations.MinRequiredLength};"); } writer.WriteLine("return true;"); return true; } // Now handle anchors that boost the position but may not determine immediate success or failure. switch (regexTree.FindOptimizations.LeadingAnchor) { case RegexNodeKind.Bol: // Optimize the handling of a Beginning-Of-Line (BOL) anchor. BOL is special, in that unlike // other anchors like Beginning, there are potentially multiple places a BOL can match. So unlike // the other anchors, which all skip all subsequent processing if found, with BOL we just use it // to boost our position to the next line, and then continue normally with any searches. writer.WriteLine("// Beginning-of-line anchor"); using (EmitBlock(writer, "if (pos > 0 && inputSpan[pos - 1] != '\\n')")) { writer.WriteLine("int newlinePos = global::System.MemoryExtensions.IndexOf(inputSpan.Slice(pos), '\\n');"); using (EmitBlock(writer, "if ((uint)newlinePos > inputSpan.Length - pos - 1)")) { Goto(NoStartingPositionFound); } writer.WriteLine("pos = newlinePos + pos + 1;"); } writer.WriteLine(); break; } switch (regexTree.FindOptimizations.TrailingAnchor) { case RegexNodeKind.End when regexTree.FindOptimizations.MaxPossibleLength is int maxLength: writer.WriteLine("// End \\z anchor with maximum-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {maxLength})")) { writer.WriteLine($"pos = inputSpan.Length - {maxLength};"); } writer.WriteLine(); break; case RegexNodeKind.EndZ when regexTree.FindOptimizations.MaxPossibleLength is int maxLength: writer.WriteLine("// End \\Z anchor with maximum-length match"); using (EmitBlock(writer, $"if (pos < inputSpan.Length - {maxLength + 1})")) { writer.WriteLine($"pos = inputSpan.Length - {maxLength + 1};"); } writer.WriteLine(); break; } return false; } // Emits a case-sensitive prefix search for a string at the beginning of the pattern. void EmitIndexOf(string prefix) { writer.WriteLine($"int i = global::System.MemoryExtensions.IndexOf(inputSpan.Slice(pos), {Literal(prefix)});"); writer.WriteLine("if (i >= 0)"); writer.WriteLine("{"); writer.WriteLine(" base.runtextpos = pos + i;"); writer.WriteLine(" return true;"); writer.WriteLine("}"); } // Emits a search for a set at a fixed position from the start of the pattern, // and potentially other sets at other fixed positions in the pattern. void EmitFixedSet() { List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)>? sets = regexTree.FindOptimizations.FixedDistanceSets; (char[]? Chars, string Set, int Distance, bool CaseInsensitive) primarySet = sets![0]; const int MaxSets = 4; int setsToUse = Math.Min(sets.Count, MaxSets); // If we can use IndexOf{Any}, try to accelerate the skip loop via vectorization to match the first prefix. // We can use it if this is a case-sensitive class with a small number of characters in the class. int setIndex = 0; bool canUseIndexOf = !primarySet.CaseInsensitive && primarySet.Chars is not null; bool needLoop = !canUseIndexOf || setsToUse > 1; FinishEmitScope loopBlock = default; if (needLoop) { writer.WriteLine("global::System.ReadOnlySpan<char> span = inputSpan.Slice(pos);"); string upperBound = "span.Length" + (setsToUse > 1 || primarySet.Distance != 0 ? $" - {minRequiredLength - 1}" : ""); loopBlock = EmitBlock(writer, $"for (int i = 0; i < {upperBound}; i++)"); } if (canUseIndexOf) { string span = needLoop ? "span" : "inputSpan.Slice(pos)"; span = (needLoop, primarySet.Distance) switch { (false, 0) => span, (true, 0) => $"{span}.Slice(i)", (false, _) => $"{span}.Slice({primarySet.Distance})", (true, _) => $"{span}.Slice(i + {primarySet.Distance})", }; string indexOf = primarySet.Chars!.Length switch { 1 => $"global::System.MemoryExtensions.IndexOf({span}, {Literal(primarySet.Chars[0])})", 2 => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(primarySet.Chars[0])}, {Literal(primarySet.Chars[1])})", 3 => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(primarySet.Chars[0])}, {Literal(primarySet.Chars[1])}, {Literal(primarySet.Chars[2])})", _ => $"global::System.MemoryExtensions.IndexOfAny({span}, {Literal(new string(primarySet.Chars))})", }; if (needLoop) { writer.WriteLine($"int indexOfPos = {indexOf};"); using (EmitBlock(writer, "if (indexOfPos < 0)")) { Goto(NoStartingPositionFound); } writer.WriteLine("i += indexOfPos;"); writer.WriteLine(); if (setsToUse > 1) { using (EmitBlock(writer, $"if (i >= span.Length - {minRequiredLength - 1})")) { Goto(NoStartingPositionFound); } writer.WriteLine(); } } else { writer.WriteLine($"int i = {indexOf};"); using (EmitBlock(writer, "if (i >= 0)")) { writer.WriteLine("base.runtextpos = pos + i;"); writer.WriteLine("return true;"); } } setIndex = 1; } if (needLoop) { Debug.Assert(setIndex == 0 || setIndex == 1); bool hasCharClassConditions = false; if (setIndex < setsToUse) { // if (CharInClass(textSpan[i + charClassIndex], prefix[0], "...") && // ...) Debug.Assert(needLoop); int start = setIndex; for (; setIndex < setsToUse; setIndex++) { string spanIndex = $"span[i{(sets[setIndex].Distance > 0 ? $" + {sets[setIndex].Distance}" : "")}]"; string charInClassExpr = MatchCharacterClass(hasTextInfo, options, spanIndex, sets[setIndex].Set, sets[setIndex].CaseInsensitive, negate: false, additionalDeclarations, ref requiredHelpers); if (setIndex == start) { writer.Write($"if ({charInClassExpr}"); } else { writer.WriteLine(" &&"); writer.Write($" {charInClassExpr}"); } } writer.WriteLine(")"); hasCharClassConditions = true; } using (hasCharClassConditions ? EmitBlock(writer, null) : default) { writer.WriteLine("base.runtextpos = pos + i;"); writer.WriteLine("return true;"); } } loopBlock.Dispose(); } // Emits a search for a literal following a leading atomic single-character loop. void EmitLiteralAfterAtomicLoop() { Debug.Assert(regexTree.FindOptimizations.LiteralAfterLoop is not null); (RegexNode LoopNode, (char Char, string? String, char[]? Chars) Literal) target = regexTree.FindOptimizations.LiteralAfterLoop.Value; Debug.Assert(target.LoopNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic); Debug.Assert(target.LoopNode.N == int.MaxValue); using (EmitBlock(writer, "while (true)")) { writer.WriteLine($"global::System.ReadOnlySpan<char> slice = inputSpan.Slice(pos);"); writer.WriteLine(); // Find the literal. If we can't find it, we're done searching. writer.Write("int i = global::System.MemoryExtensions."); writer.WriteLine( target.Literal.String is string literalString ? $"IndexOf(slice, {Literal(literalString)});" : target.Literal.Chars is not char[] literalChars ? $"IndexOf(slice, {Literal(target.Literal.Char)});" : literalChars.Length switch { 2 => $"IndexOfAny(slice, {Literal(literalChars[0])}, {Literal(literalChars[1])});", 3 => $"IndexOfAny(slice, {Literal(literalChars[0])}, {Literal(literalChars[1])}, {Literal(literalChars[2])});", _ => $"IndexOfAny(slice, {Literal(new string(literalChars))});", }); using (EmitBlock(writer, $"if (i < 0)")) { writer.WriteLine("break;"); } writer.WriteLine(); // We found the literal. Walk backwards from it finding as many matches as we can against the loop. writer.WriteLine("int prev = i;"); writer.WriteLine($"while ((uint)--prev < (uint)slice.Length && {MatchCharacterClass(hasTextInfo, options, "slice[prev]", target.LoopNode.Str!, caseInsensitive: false, negate: false, additionalDeclarations, ref requiredHelpers)});"); if (target.LoopNode.M > 0) { // If we found fewer than needed, loop around to try again. The loop doesn't overlap with the literal, // so we can start from after the last place the literal matched. writer.WriteLine($"if ((i - prev - 1) < {target.LoopNode.M})"); writer.WriteLine("{"); writer.WriteLine(" pos += i + 1;"); writer.WriteLine(" continue;"); writer.WriteLine("}"); } writer.WriteLine(); // We have a winner. The starting position is just after the last position that failed to match the loop. // TODO: It'd be nice to be able to communicate i as a place the matching engine can start matching // after the loop, so that it doesn't need to re-match the loop. writer.WriteLine("base.runtextpos = pos + prev + 1;"); writer.WriteLine("return true;"); } } // If a TextInfo is needed to perform ToLower operations, emits a local initialized to the TextInfo to use. static void EmitTextInfo(IndentedTextWriter writer, ref bool hasTextInfo, RegexMethod rm) { // Emit local to store current culture if needed if ((rm.Options & RegexOptions.CultureInvariant) == 0) { bool needsCulture = rm.Tree.FindOptimizations.FindMode switch { FindNextStartingPositionMode.FixedLiteral_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive => true, _ when rm.Tree.FindOptimizations.FixedDistanceSets is List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)> sets => sets.Exists(set => set.CaseInsensitive), _ => false, }; if (needsCulture) { hasTextInfo = true; writer.WriteLine("global::System.Globalization.TextInfo textInfo = global::System.Globalization.CultureInfo.CurrentCulture.TextInfo;"); } } } } /// <summary>Emits the body of the TryMatchAtCurrentPosition.</summary> private static RequiredHelperFunctions EmitTryMatchAtCurrentPosition(IndentedTextWriter writer, RegexMethod rm, string id, AnalysisResults analysis) { // In .NET Framework and up through .NET Core 3.1, the code generated for RegexOptions.Compiled was effectively an unrolled // version of what RegexInterpreter would process. The RegexNode tree would be turned into a series of opcodes via // RegexWriter; the interpreter would then sit in a loop processing those opcodes, and the RegexCompiler iterated through the // opcodes generating code for each equivalent to what the interpreter would do albeit with some decisions made at compile-time // rather than at run-time. This approach, however, lead to complicated code that wasn't pay-for-play (e.g. a big backtracking // jump table that all compilations went through even if there was no backtracking), that didn't factor in the shape of the // tree (e.g. it's difficult to add optimizations based on interactions between nodes in the graph), and that didn't read well // when decompiled from IL to C# or when directly emitted as C# as part of a source generator. // // This implementation is instead based on directly walking the RegexNode tree and outputting code for each node in the graph. // A dedicated for each kind of RegexNode emits the code necessary to handle that node's processing, including recursively // calling the relevant function for any of its children nodes. Backtracking is handled not via a giant jump table, but instead // by emitting direct jumps to each backtracking construct. This is achieved by having all match failures jump to a "done" // label that can be changed by a previous emitter, e.g. before EmitLoop returns, it ensures that "doneLabel" is set to the // label that code should jump back to when backtracking. That way, a subsequent EmitXx function doesn't need to know exactly // where to jump: it simply always jumps to "doneLabel" on match failure, and "doneLabel" is always configured to point to // the right location. In an expression without backtracking, or before any backtracking constructs have been encountered, // "doneLabel" is simply the final return location from the TryMatchAtCurrentPosition method that will undo any captures and exit, signaling to // the calling scan loop that nothing was matched. // Arbitrary limit for unrolling vs creating a loop. We want to balance size in the generated // code with other costs, like the (small) overhead of slicing to create the temp span to iterate. const int MaxUnrollSize = 16; RegexOptions options = (RegexOptions)rm.Options; RegexTree regexTree = rm.Tree; RequiredHelperFunctions requiredHelpers = RequiredHelperFunctions.None; // Helper to define names. Names start unadorned, but as soon as there's repetition, // they begin to have a numbered suffix. var usedNames = new Dictionary<string, int>(); // Every RegexTree is rooted in the implicit Capture for the whole expression. // Skip the Capture node. We handle the implicit root capture specially. RegexNode node = regexTree.Root; Debug.Assert(node.Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); Debug.Assert(node.ChildCount() == 1, "Capture nodes should have one child"); node = node.Child(0); // In some limited cases, TryFindNextPossibleStartingPosition will only return true if it successfully matched the whole expression. // We can special case these to do essentially nothing in TryMatchAtCurrentPosition other than emit the capture. switch (node.Kind) { case RegexNodeKind.Multi or RegexNodeKind.Notone or RegexNodeKind.One or RegexNodeKind.Set when !IsCaseInsensitive(node): // This is the case for single and multiple characters, though the whole thing is only guaranteed // to have been validated in TryFindNextPossibleStartingPosition when doing case-sensitive comparison. writer.WriteLine($"int start = base.runtextpos;"); writer.WriteLine($"int end = start + {(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1)};"); writer.WriteLine("base.Capture(0, start, end);"); writer.WriteLine("base.runtextpos = end;"); writer.WriteLine("return true;"); return requiredHelpers; case RegexNodeKind.Empty: // This case isn't common in production, but it's very common when first getting started with the // source generator and seeing what happens as you add more to expressions. When approaching // it from a learning perspective, this is very common, as it's the empty string you start with. writer.WriteLine("base.Capture(0, base.runtextpos, base.runtextpos);"); writer.WriteLine("return true;"); return requiredHelpers; } // In some cases, we need to emit declarations at the beginning of the method, but we only discover we need them later. // To handle that, we build up a collection of all the declarations to include, track where they should be inserted, // and then insert them at that position once everything else has been output. var additionalDeclarations = new HashSet<string>(); var additionalLocalFunctions = new Dictionary<string, string[]>(); // Declare some locals. string sliceSpan = "slice"; writer.WriteLine("int pos = base.runtextpos;"); writer.WriteLine($"int original_pos = pos;"); bool hasTimeout = EmitLoopTimeoutCounterIfNeeded(writer, rm); bool hasTextInfo = EmitInitializeCultureForTryMatchAtCurrentPositionIfNecessary(writer, rm, analysis); writer.Flush(); int additionalDeclarationsPosition = ((StringWriter)writer.InnerWriter).GetStringBuilder().Length; int additionalDeclarationsIndent = writer.Indent; // The implementation tries to use const indexes into the span wherever possible, which we can do // for all fixed-length constructs. In such cases (e.g. single chars, repeaters, strings, etc.) // we know at any point in the regex exactly how far into it we are, and we can use that to index // into the span created at the beginning of the routine to begin at exactly where we're starting // in the input. When we encounter a variable-length construct, we transfer the static value to // pos, slicing the inputSpan appropriately, and then zero out the static position. int sliceStaticPos = 0; SliceInputSpan(writer, defineLocal: true); writer.WriteLine(); // doneLabel starts out as the top-level label for the whole expression failing to match. However, // it may be changed by the processing of a node to point to whereever subsequent match failures // should jump to, in support of backtracking or other constructs. For example, before emitting // the code for a branch N, an alternation will set the the doneLabel to point to the label for // processing the next branch N+1: that way, any failures in the branch N's processing will // implicitly end up jumping to the right location without needing to know in what context it's used. string doneLabel = ReserveName("NoMatch"); string topLevelDoneLabel = doneLabel; // Check whether there are captures anywhere in the expression. If there isn't, we can skip all // the boilerplate logic around uncapturing, as there won't be anything to uncapture. bool expressionHasCaptures = analysis.MayContainCapture(node); // Emit the code for all nodes in the tree. EmitNode(node); // If we fall through to this place in the code, we've successfully matched the expression. writer.WriteLine(); writer.WriteLine("// The input matched."); if (sliceStaticPos > 0) { EmitAdd(writer, "pos", sliceStaticPos); // TransferSliceStaticPosToPos would also slice, which isn't needed here } writer.WriteLine("base.runtextpos = pos;"); writer.WriteLine("base.Capture(0, original_pos, pos);"); writer.WriteLine("return true;"); // We're done with the match. // Patch up any additional declarations. ReplaceAdditionalDeclarations(writer, additionalDeclarations, additionalDeclarationsPosition, additionalDeclarationsIndent); // And emit any required helpers. if (additionalLocalFunctions.Count != 0) { foreach (KeyValuePair<string, string[]> localFunctions in additionalLocalFunctions.OrderBy(k => k.Key)) { writer.WriteLine(); foreach (string line in localFunctions.Value) { writer.WriteLine(line); } } } return requiredHelpers; // Helper to create a name guaranteed to be unique within the function. string ReserveName(string prefix) { usedNames.TryGetValue(prefix, out int count); usedNames[prefix] = count + 1; return count == 0 ? prefix : $"{prefix}{count}"; } // Helper to emit a label. As of C# 10, labels aren't statements of their own and need to adorn a following statement; // if a label appears just before a closing brace, then, it's a compilation error. To avoid issues there, this by // default implements a blank statement (a semicolon) after each label, but individual uses can opt-out of the semicolon // when it's known the label will always be followed by a statement. void MarkLabel(string label, bool emitSemicolon = true) => writer.WriteLine($"{label}:{(emitSemicolon ? ";" : "")}"); // Emits a goto to jump to the specified label. However, if the specified label is the top-level done label indicating // that the entire match has failed, we instead emit our epilogue, uncapturing if necessary and returning out of TryMatchAtCurrentPosition. void Goto(string label) { if (label == topLevelDoneLabel) { // We only get here in the code if the whole expression fails to match and jumps to // the original value of doneLabel. if (expressionHasCaptures) { EmitUncaptureUntil("0"); } writer.WriteLine("return false; // The input didn't match."); } else { writer.WriteLine($"goto {label};"); } } // Emits a case or default line followed by an indented body. void CaseGoto(string clause, string label) { writer.WriteLine(clause); writer.Indent++; Goto(label); writer.Indent--; } // Whether the node has RegexOptions.IgnoreCase set. static bool IsCaseInsensitive(RegexNode node) => (node.Options & RegexOptions.IgnoreCase) != 0; // Slices the inputSpan starting at pos until end and stores it into slice. void SliceInputSpan(IndentedTextWriter writer, bool defineLocal = false) { if (defineLocal) { writer.Write("global::System.ReadOnlySpan<char> "); } writer.WriteLine($"{sliceSpan} = inputSpan.Slice(pos);"); } // Emits the sum of a constant and a value from a local. string Sum(int constant, string? local = null) => local is null ? constant.ToString(CultureInfo.InvariantCulture) : constant == 0 ? local : $"{constant} + {local}"; // Emits a check that the span is large enough at the currently known static position to handle the required additional length. void EmitSpanLengthCheck(int requiredLength, string? dynamicRequiredLength = null) { Debug.Assert(requiredLength > 0); using (EmitBlock(writer, $"if ({SpanLengthCheck(requiredLength, dynamicRequiredLength)})")) { Goto(doneLabel); } } // Returns a length check for the current span slice. The check returns true if // the span isn't long enough for the specified length. string SpanLengthCheck(int requiredLength, string? dynamicRequiredLength = null) => dynamicRequiredLength is null && sliceStaticPos + requiredLength == 1 ? $"{sliceSpan}.IsEmpty" : $"(uint){sliceSpan}.Length < {Sum(sliceStaticPos + requiredLength, dynamicRequiredLength)}"; // Adds the value of sliceStaticPos into the pos local, slices slice by the corresponding amount, // and zeros out sliceStaticPos. void TransferSliceStaticPosToPos() { if (sliceStaticPos > 0) { EmitAdd(writer, "pos", sliceStaticPos); writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice({sliceStaticPos});"); sliceStaticPos = 0; } } // Emits the code for an alternation. void EmitAlternation(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Alternate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); int childCount = node.ChildCount(); Debug.Assert(childCount >= 2); string originalDoneLabel = doneLabel; // Both atomic and non-atomic are supported. While a parent RegexNode.Atomic node will itself // successfully prevent backtracking into this child node, we can emit better / cheaper code // for an Alternate when it is atomic, so we still take it into account here. Debug.Assert(node.Parent is not null); bool isAtomic = analysis.IsAtomicByAncestor(node); // If no child branch overlaps with another child branch, we can emit more streamlined code // that avoids checking unnecessary branches, e.g. with abc|def|ghi if the next character in // the input is 'a', we needn't try the def or ghi branches. A simple, relatively common case // of this is if every branch begins with a specific, unique character, in which case // the whole alternation can be treated as a simple switch, so we special-case that. However, // we can't goto _into_ switch cases, which means we can't use this approach if there's any // possibility of backtracking into the alternation. bool useSwitchedBranches = isAtomic; if (!useSwitchedBranches) { useSwitchedBranches = true; for (int i = 0; i < childCount; i++) { if (analysis.MayBacktrack(node.Child(i))) { useSwitchedBranches = false; break; } } } // Detect whether every branch begins with one or more unique characters. const int SetCharsSize = 5; // arbitrary limit (for IgnoreCase, we want this to be at least 3 to handle the vast majority of values) Span<char> setChars = stackalloc char[SetCharsSize]; if (useSwitchedBranches) { // Iterate through every branch, seeing if we can easily find a starting One, Multi, or small Set. // If we can, extract its starting char (or multiple in the case of a set), validate that all such // starting characters are unique relative to all the branches. var seenChars = new HashSet<char>(); for (int i = 0; i < childCount && useSwitchedBranches; i++) { // If it's not a One, Multi, or Set, we can't apply this optimization. // If it's IgnoreCase (and wasn't reduced to a non-IgnoreCase set), also ignore it to keep the logic simple. if (node.Child(i).FindBranchOneMultiOrSetStart() is not RegexNode oneMultiOrSet || (oneMultiOrSet.Options & RegexOptions.IgnoreCase) != 0) // TODO: https://github.com/dotnet/runtime/issues/61048 { useSwitchedBranches = false; break; } // If it's a One or a Multi, get the first character and add it to the set. // If it was already in the set, we can't apply this optimization. if (oneMultiOrSet.Kind is RegexNodeKind.One or RegexNodeKind.Multi) { if (!seenChars.Add(oneMultiOrSet.FirstCharOfOneOrMulti())) { useSwitchedBranches = false; break; } } else { // The branch begins with a set. Make sure it's a set of only a few characters // and get them. If we can't, we can't apply this optimization. Debug.Assert(oneMultiOrSet.Kind is RegexNodeKind.Set); int numChars; if (RegexCharClass.IsNegated(oneMultiOrSet.Str!) || (numChars = RegexCharClass.GetSetChars(oneMultiOrSet.Str!, setChars)) == 0) { useSwitchedBranches = false; break; } // Check to make sure each of the chars is unique relative to all other branches examined. foreach (char c in setChars.Slice(0, numChars)) { if (!seenChars.Add(c)) { useSwitchedBranches = false; break; } } } } } if (useSwitchedBranches) { // Note: This optimization does not exist with RegexOptions.Compiled. Here we rely on the // C# compiler to lower the C# switch statement with appropriate optimizations. In some // cases there are enough branches that the compiler will emit a jump table. In others // it'll optimize the order of checks in order to minimize the total number in the worst // case. In any case, we get easier to read and reason about C#. EmitSwitchedBranches(); } else { EmitAllBranches(); } return; // Emits the code for a switch-based alternation of non-overlapping branches. void EmitSwitchedBranches() { // We need at least 1 remaining character in the span, for the char to switch on. EmitSpanLengthCheck(1); writer.WriteLine(); // Emit a switch statement on the first char of each branch. using (EmitBlock(writer, $"switch ({sliceSpan}[{sliceStaticPos++}])")) { Span<char> setChars = stackalloc char[SetCharsSize]; // needs to be same size as detection check in caller int startingSliceStaticPos = sliceStaticPos; // Emit a case for each branch. for (int i = 0; i < childCount; i++) { sliceStaticPos = startingSliceStaticPos; RegexNode child = node.Child(i); Debug.Assert(child.Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set or RegexNodeKind.Concatenate, DescribeNode(child, analysis)); Debug.Assert(child.Kind is not RegexNodeKind.Concatenate || (child.ChildCount() >= 2 && child.Child(0).Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set)); RegexNode? childStart = child.FindBranchOneMultiOrSetStart(); Debug.Assert(childStart is not null, "Unexpectedly couldn't find the branch starting node."); Debug.Assert((childStart.Options & RegexOptions.IgnoreCase) == 0, "Expected only to find non-IgnoreCase branch starts"); if (childStart.Kind is RegexNodeKind.Set) { int numChars = RegexCharClass.GetSetChars(childStart.Str!, setChars); Debug.Assert(numChars != 0); writer.WriteLine($"case {string.Join(" or ", setChars.Slice(0, numChars).ToArray().Select(c => Literal(c)))}:"); } else { writer.WriteLine($"case {Literal(childStart.FirstCharOfOneOrMulti())}:"); } writer.Indent++; // Emit the code for the branch, without the first character that was already matched in the switch. switch (child.Kind) { case RegexNodeKind.Multi: EmitNode(CloneMultiWithoutFirstChar(child)); writer.WriteLine(); break; case RegexNodeKind.Concatenate: var newConcat = new RegexNode(RegexNodeKind.Concatenate, child.Options); if (childStart.Kind == RegexNodeKind.Multi) { newConcat.AddChild(CloneMultiWithoutFirstChar(childStart)); } int concatChildCount = child.ChildCount(); for (int j = 1; j < concatChildCount; j++) { newConcat.AddChild(child.Child(j)); } EmitNode(newConcat.Reduce()); writer.WriteLine(); break; static RegexNode CloneMultiWithoutFirstChar(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Multi); Debug.Assert(node.Str!.Length >= 2); return node.Str!.Length == 2 ? new RegexNode(RegexNodeKind.One, node.Options, node.Str![1]) : new RegexNode(RegexNodeKind.Multi, node.Options, node.Str!.Substring(1)); } } // This is only ever used for atomic alternations, so we can simply reset the doneLabel // after emitting the child, as nothing will backtrack here (and we need to reset it // so that all branches see the original). doneLabel = originalDoneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. TransferSliceStaticPosToPos(); writer.WriteLine($"break;"); writer.WriteLine(); writer.Indent--; } // Default branch if the character didn't match the start of any branches. CaseGoto("default:", doneLabel); } } void EmitAllBranches() { // Label to jump to when any branch completes successfully. string matchLabel = ReserveName("AlternationMatch"); // Save off pos. We'll need to reset this each time a branch fails. string startingPos = ReserveName("alternation_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); int startingSliceStaticPos = sliceStaticPos; // We need to be able to undo captures in two situations: // - If a branch of the alternation itself contains captures, then if that branch // fails to match, any captures from that branch until that failure point need to // be uncaptured prior to jumping to the next branch. // - If the expression after the alternation contains captures, then failures // to match in those expressions could trigger backtracking back into the // alternation, and thus we need uncapture any of them. // As such, if the alternation contains captures or if it's not atomic, we need // to grab the current crawl position so we can unwind back to it when necessary. // We can do all of the uncapturing as part of falling through to the next branch. // If we fail in a branch, then such uncapturing will unwind back to the position // at the start of the alternation. If we fail after the alternation, and the // matched branch didn't contain any backtracking, then the failure will end up // jumping to the next branch, which will unwind the captures. And if we fail after // the alternation and the matched branch did contain backtracking, that backtracking // construct is responsible for unwinding back to its starting crawl position. If // it eventually ends up failing, that failure will result in jumping to the next branch // of the alternation, which will again dutifully unwind the remaining captures until // what they were at the start of the alternation. Of course, if there are no captures // anywhere in the regex, we don't have to do any of that. string? startingCapturePos = null; if (expressionHasCaptures && (analysis.MayContainCapture(node) || !isAtomic)) { startingCapturePos = ReserveName("alternation_starting_capturepos"); writer.WriteLine($"int {startingCapturePos} = base.Crawlpos();"); } writer.WriteLine(); // After executing the alternation, subsequent matching may fail, at which point execution // will need to backtrack to the alternation. We emit a branching table at the end of the // alternation, with a label that will be left as the "doneLabel" upon exiting emitting the // alternation. The branch table is populated with an entry for each branch of the alternation, // containing either the label for the last backtracking construct in the branch if such a construct // existed (in which case the doneLabel upon emitting that node will be different from before it) // or the label for the next branch. var labelMap = new string[childCount]; string backtrackLabel = ReserveName("AlternationBacktrack"); for (int i = 0; i < childCount; i++) { // If the alternation isn't atomic, backtracking may require our jump table jumping back // into these branches, so we can't use actual scopes, as that would hide the labels. using (EmitScope(writer, $"Branch {i}", faux: !isAtomic)) { bool isLastBranch = i == childCount - 1; string? nextBranch = null; if (!isLastBranch) { // Failure to match any branch other than the last one should result // in jumping to process the next branch. nextBranch = ReserveName("AlternationBranch"); doneLabel = nextBranch; } else { // Failure to match the last branch is equivalent to failing to match // the whole alternation, which means those failures should jump to // what "doneLabel" was defined as when starting the alternation. doneLabel = originalDoneLabel; } // Emit the code for each branch. EmitNode(node.Child(i)); writer.WriteLine(); // Add this branch to the backtracking table. At this point, either the child // had backtracking constructs, in which case doneLabel points to the last one // and that's where we'll want to jump to, or it doesn't, in which case doneLabel // still points to the nextBranch, which similarly is where we'll want to jump to. if (!isAtomic) { EmitStackPush(startingCapturePos is not null ? new[] { i.ToString(), startingPos, startingCapturePos } : new[] { i.ToString(), startingPos }); } labelMap[i] = doneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. TransferSliceStaticPosToPos(); if (!isLastBranch || !isAtomic) { // If this isn't the last branch, we're about to output a reset section, // and if this isn't atomic, there will be a backtracking section before // the end of the method. In both of those cases, we've successfully // matched and need to skip over that code. If, however, this is the // last branch and this is an atomic alternation, we can just fall // through to the successfully matched location. Goto(matchLabel); } // Reset state for next branch and loop around to generate it. This includes // setting pos back to what it was at the beginning of the alternation, // updating slice to be the full length it was, and if there's a capture that // needs to be reset, uncapturing it. if (!isLastBranch) { writer.WriteLine(); MarkLabel(nextBranch!, emitSemicolon: false); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } } } writer.WriteLine(); } // We should never fall through to this location in the generated code. Either // a branch succeeded in matching and jumped to the end, or a branch failed in // matching and jumped to the next branch location. We only get to this code // if backtracking occurs and the code explicitly jumps here based on our setting // "doneLabel" to the label for this section. Thus, we only need to emit it if // something can backtrack to us, which can't happen if we're inside of an atomic // node. Thus, emit the backtracking section only if we're non-atomic. if (isAtomic) { doneLabel = originalDoneLabel; } else { doneLabel = backtrackLabel; MarkLabel(backtrackLabel, emitSemicolon: false); EmitStackPop(startingCapturePos is not null ? new[] { startingCapturePos, startingPos } : new[] { startingPos}); using (EmitBlock(writer, $"switch ({StackPop()})")) { for (int i = 0; i < labelMap.Length; i++) { CaseGoto($"case {i}:", labelMap[i]); } } writer.WriteLine(); } // Successfully completed the alternate. MarkLabel(matchLabel); Debug.Assert(sliceStaticPos == 0); } } // Emits the code to handle a backreference. void EmitBackreference(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Backreference, $"Unexpected type: {node.Kind}"); int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); if (sliceStaticPos > 0) { TransferSliceStaticPosToPos(); writer.WriteLine(); } // If the specified capture hasn't yet captured anything, fail to match... except when using RegexOptions.ECMAScript, // in which case per ECMA 262 section 21.2.2.9 the backreference should succeed. if ((node.Options & RegexOptions.ECMAScript) != 0) { writer.WriteLine($"// If the {DescribeCapture(node.M, analysis)} hasn't matched, the backreference matches with RegexOptions.ECMAScript rules."); using (EmitBlock(writer, $"if (base.IsMatched({capnum}))")) { EmitWhenHasCapture(); } } else { writer.WriteLine($"// If the {DescribeCapture(node.M, analysis)} hasn't matched, the backreference doesn't match."); using (EmitBlock(writer, $"if (!base.IsMatched({capnum}))")) { Goto(doneLabel); } writer.WriteLine(); EmitWhenHasCapture(); } void EmitWhenHasCapture() { writer.WriteLine("// Get the captured text. If it doesn't match at the current position, the backreference doesn't match."); additionalDeclarations.Add("int matchLength = 0;"); writer.WriteLine($"matchLength = base.MatchLength({capnum});"); if (!IsCaseInsensitive(node)) { // If we're case-sensitive, we can simply validate that the remaining length of the slice is sufficient // to possibly match, and then do a SequenceEqual against the matched text. writer.WriteLine($"if ({sliceSpan}.Length < matchLength || "); using (EmitBlock(writer, $" !global::System.MemoryExtensions.SequenceEqual(inputSpan.Slice(base.MatchIndex({capnum}), matchLength), {sliceSpan}.Slice(0, matchLength)))")) { Goto(doneLabel); } } else { // For case-insensitive, we have to walk each character individually. using (EmitBlock(writer, $"if ({sliceSpan}.Length < matchLength)")) { Goto(doneLabel); } writer.WriteLine(); additionalDeclarations.Add("int matchIndex = 0;"); writer.WriteLine($"matchIndex = base.MatchIndex({capnum});"); using (EmitBlock(writer, $"for (int i = 0; i < matchLength; i++)")) { using (EmitBlock(writer, $"if ({ToLower(hasTextInfo, options, $"inputSpan[matchIndex + i]")} != {ToLower(hasTextInfo, options, $"{sliceSpan}[i]")})")) { Goto(doneLabel); } } } writer.WriteLine(); writer.WriteLine($"pos += matchLength;"); SliceInputSpan(writer); } } // Emits the code for an if(backreference)-then-else conditional. void EmitBackreferenceConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.BackreferenceConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 2, $"Expected 2 children, found {node.ChildCount()}"); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // Get the capture number to test. int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(0); RegexNode? noBranch = node.Child(1) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; string originalDoneLabel = doneLabel; // If the child branches might backtrack, we can't emit the branches inside constructs that // require braces, e.g. if/else, even though that would yield more idiomatic output. // But if we know for certain they won't backtrack, we can output the nicer code. if (analysis.IsAtomicByAncestor(node) || (!analysis.MayBacktrack(yesBranch) && (noBranch is null || !analysis.MayBacktrack(noBranch)))) { using (EmitBlock(writer, $"if (base.IsMatched({capnum}))")) { writer.WriteLine($"// The {DescribeCapture(node.M, analysis)} captured a value. Match the first branch."); EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch } if (noBranch is not null) { using (EmitBlock(writer, $"else")) { writer.WriteLine($"// Otherwise, match the second branch."); EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch } } doneLabel = originalDoneLabel; // atomicity return; } string refNotMatched = ReserveName("ConditionalBackreferenceNotMatched"); string endConditional = ReserveName("ConditionalBackreferenceEnd"); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the conditional needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. string resumeAt = ReserveName("conditionalbackreference_branch"); writer.WriteLine($"int {resumeAt} = 0;"); // While it would be nicely readable to use an if/else block, if the branches contain // anything that triggers backtracking, labels will end up being defined, and if they're // inside the scope block for the if or else, that will prevent jumping to them from // elsewhere. So we implement the if/else with labels and gotos manually. // Check to see if the specified capture number was captured. using (EmitBlock(writer, $"if (!base.IsMatched({capnum}))")) { Goto(refNotMatched); } writer.WriteLine(); // The specified capture was captured. Run the "yes" branch. // If it successfully matches, jump to the end. EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch string postYesDoneLabel = doneLabel; if (postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 0;"); } bool needsEndConditional = postYesDoneLabel != originalDoneLabel || noBranch is not null; if (needsEndConditional) { Goto(endConditional); writer.WriteLine(); } MarkLabel(refNotMatched); string postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (postNoDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 1;"); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 2;"); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. bool hasBacktracking = postYesDoneLabel != originalDoneLabel || postNoDoneLabel != originalDoneLabel; if (hasBacktracking) { // Skip the backtracking section. Goto(endConditional); writer.WriteLine(); // Backtrack section string backtrack = ReserveName("ConditionalBackreferenceBacktrack"); doneLabel = backtrack; MarkLabel(backtrack); // Pop from the stack the branch that was used and jump back to its backtracking location. EmitStackPop(resumeAt); using (EmitBlock(writer, $"switch ({resumeAt})")) { if (postYesDoneLabel != originalDoneLabel) { CaseGoto("case 0:", postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { CaseGoto("case 1:", postNoDoneLabel); } CaseGoto("default:", originalDoneLabel); } } if (needsEndConditional) { MarkLabel(endConditional); } if (hasBacktracking) { // We're not atomic and at least one of the yes or no branches contained backtracking constructs, // so finish outputting our backtracking logic, which involves pushing onto the stack which // branch to backtrack into. EmitStackPush(resumeAt); } } // Emits the code for an if(expression)-then-else conditional. void EmitExpressionConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 3, $"Expected 3 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // The first child node is the condition expression. If this matches, then we branch to the "yes" branch. // If it doesn't match, then we branch to the optional "no" branch if it exists, or simply skip the "yes" // branch, otherwise. The condition is treated as a positive lookahead. RegexNode condition = node.Child(0); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(1); RegexNode? noBranch = node.Child(2) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; string originalDoneLabel = doneLabel; string expressionNotMatched = ReserveName("ConditionalExpressionNotMatched"); string endConditional = ReserveName("ConditionalExpressionEnd"); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the condition needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. string resumeAt = ReserveName("conditionalexpression_branch"); if (!isAtomic) { writer.WriteLine($"int {resumeAt} = 0;"); } // If the condition expression has captures, we'll need to uncapture them in the case of no match. string? startingCapturePos = null; if (analysis.MayContainCapture(condition)) { startingCapturePos = ReserveName("conditionalexpression_starting_capturepos"); writer.WriteLine($"int {startingCapturePos} = base.Crawlpos();"); } // Emit the condition expression. Route any failures to after the yes branch. This code is almost // the same as for a positive lookahead; however, a positive lookahead only needs to reset the position // on a successful match, as a failed match fails the whole expression; here, we need to reset the // position on completion, regardless of whether the match is successful or not. doneLabel = expressionNotMatched; // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("conditionalexpression_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); int startingSliceStaticPos = sliceStaticPos; // Emit the child. The condition expression is a zero-width assertion, which is atomic, // so prevent backtracking into it. writer.WriteLine("// Condition:"); EmitNode(condition); writer.WriteLine(); doneLabel = originalDoneLabel; // After the condition completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. writer.WriteLine("// Condition matched:"); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; writer.WriteLine(); // The expression matched. Run the "yes" branch. If it successfully matches, jump to the end. EmitNode(yesBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch string postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 0;"); } Goto(endConditional); writer.WriteLine(); // After the condition completes unsuccessfully, reset the text positions // _and_ reset captures, which should not persist when the whole expression failed. writer.WriteLine("// Condition did not match:"); MarkLabel(expressionNotMatched, emitSemicolon: false); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } writer.WriteLine(); string postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); writer.WriteLine(); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 1;"); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { writer.WriteLine($"{resumeAt} = 2;"); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { doneLabel = originalDoneLabel; MarkLabel(endConditional); } else { // Skip the backtracking section. Goto(endConditional); writer.WriteLine(); string backtrack = ReserveName("ConditionalExpressionBacktrack"); doneLabel = backtrack; MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(resumeAt); using (EmitBlock(writer, $"switch ({resumeAt})")) { if (postYesDoneLabel != originalDoneLabel) { CaseGoto("case 0:", postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { CaseGoto("case 1:", postNoDoneLabel); } CaseGoto("default:", originalDoneLabel); } MarkLabel(endConditional, emitSemicolon: false); EmitStackPush(resumeAt); } } // Emits the code for a Capture node. void EmitCapture(RegexNode node, RegexNode? subsequent = null) { Debug.Assert(node.Kind is RegexNodeKind.Capture, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int capnum = RegexParser.MapCaptureNumber(node.M, rm.Tree.CaptureNumberSparseMapping); int uncapnum = RegexParser.MapCaptureNumber(node.N, rm.Tree.CaptureNumberSparseMapping); bool isAtomic = analysis.IsAtomicByAncestor(node); TransferSliceStaticPosToPos(); string startingPos = ReserveName("capture_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); RegexNode child = node.Child(0); if (uncapnum != -1) { using (EmitBlock(writer, $"if (!base.IsMatched({uncapnum}))")) { Goto(doneLabel); } writer.WriteLine(); } // Emit child node. string originalDoneLabel = doneLabel; EmitNode(child, subsequent); bool childBacktracks = doneLabel != originalDoneLabel; writer.WriteLine(); TransferSliceStaticPosToPos(); if (uncapnum == -1) { writer.WriteLine($"base.Capture({capnum}, {startingPos}, pos);"); } else { writer.WriteLine($"base.TransferCapture({capnum}, {uncapnum}, {startingPos}, pos);"); } if (isAtomic || !childBacktracks) { // If the capture is atomic and nothing can backtrack into it, we're done. // Similarly, even if the capture isn't atomic, if the captured expression // doesn't do any backtracking, we're done. doneLabel = originalDoneLabel; } else { // We're not atomic and the child node backtracks. When it does, we need // to ensure that the starting position for the capture is appropriately // reset to what it was initially (it could have changed as part of being // in a loop or similar). So, we emit a backtracking section that // pushes/pops the starting position before falling through. writer.WriteLine(); EmitStackPush(startingPos); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label string backtrack = ReserveName($"CaptureBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(startingPos); if (!childBacktracks) { writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); } Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } // Emits the code to handle a positive lookahead assertion. void EmitPositiveLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.PositiveLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("positivelookahead_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); writer.WriteLine(); int startingSliceStaticPos = sliceStaticPos; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // After the child completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. writer.WriteLine(); writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; } // Emits the code to handle a negative lookahead assertion. void EmitNegativeLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); string originalDoneLabel = doneLabel; // Save off pos. We'll need to reset this upon successful completion of the lookahead. string startingPos = ReserveName("negativelookahead_starting_pos"); writer.WriteLine($"int {startingPos} = pos;"); int startingSliceStaticPos = sliceStaticPos; string negativeLookaheadDoneLabel = ReserveName("NegativeLookaheadMatch"); doneLabel = negativeLookaheadDoneLabel; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // If the generated code ends up here, it matched the lookahead, which actually // means failure for a _negative_ lookahead, so we need to jump to the original done. writer.WriteLine(); Goto(originalDoneLabel); writer.WriteLine(); // Failures (success for a negative lookahead) jump here. MarkLabel(negativeLookaheadDoneLabel, emitSemicolon: false); // After the child completes in failure (success for negative lookahead), reset the text positions. writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); sliceStaticPos = startingSliceStaticPos; doneLabel = originalDoneLabel; } // Emits the code for the node. void EmitNode(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { StackHelper.CallOnEmptyStack(EmitNode, node, subsequent, emitLengthChecksIfRequired); return; } // Separate out several node types that, for conciseness, don't need a header and scope written into the source. switch (node.Kind) { // Nothing is written for an empty case RegexNodeKind.Empty: return; // A match failure doesn't need a scope. case RegexNodeKind.Nothing: Goto(doneLabel); return; // Skip atomic nodes that wrap non-backtracking children; in such a case there's nothing to be made atomic. case RegexNodeKind.Atomic when !analysis.MayBacktrack(node.Child(0)): EmitNode(node.Child(0)); return; // Concatenate is a simplification in the node tree so that a series of children can be represented as one. // We don't need its presence visible in the source. case RegexNodeKind.Concatenate: EmitConcatenation(node, subsequent, emitLengthChecksIfRequired); return; } // Put the node's code into its own scope. If the node contains labels that may need to // be visible outside of its scope, the scope is still emitted for clarity but is commented out. using (EmitScope(writer, DescribeNode(node, analysis), faux: analysis.MayBacktrack(node))) { switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: case RegexNodeKind.Bol: case RegexNodeKind.Eol: case RegexNodeKind.End: case RegexNodeKind.EndZ: EmitAnchors(node); break; case RegexNodeKind.Boundary: case RegexNodeKind.NonBoundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.NonECMABoundary: EmitBoundary(node); break; case RegexNodeKind.Multi: EmitMultiChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: EmitSingleChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: EmitSingleCharLoop(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: EmitSingleCharLazy(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: EmitSingleCharAtomicLoop(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Loop: EmitLoop(node); break; case RegexNodeKind.Lazyloop: EmitLazy(node); break; case RegexNodeKind.Alternate: EmitAlternation(node); break; case RegexNodeKind.Backreference: EmitBackreference(node); break; case RegexNodeKind.BackreferenceConditional: EmitBackreferenceConditional(node); break; case RegexNodeKind.ExpressionConditional: EmitExpressionConditional(node); break; case RegexNodeKind.Atomic when analysis.MayBacktrack(node.Child(0)): EmitAtomic(node, subsequent); return; case RegexNodeKind.Capture: EmitCapture(node, subsequent); break; case RegexNodeKind.PositiveLookaround: EmitPositiveLookaheadAssertion(node); break; case RegexNodeKind.NegativeLookaround: EmitNegativeLookaheadAssertion(node); break; case RegexNodeKind.UpdateBumpalong: EmitUpdateBumpalong(node); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } } } // Emits the node for an atomic. void EmitAtomic(RegexNode node, RegexNode? subsequent) { Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(analysis.MayBacktrack(node.Child(0)), "Expected child to potentially backtrack"); // Grab the current done label and the current backtracking position. The purpose of the atomic node // is to ensure that nodes after it that might backtrack skip over the atomic, which means after // rendering the atomic's child, we need to reset the label so that subsequent backtracking doesn't // see any label left set by the atomic's child. We also need to reset the backtracking stack position // so that the state on the stack remains consistent. string originalDoneLabel = doneLabel; additionalDeclarations.Add("int stackpos = 0;"); string startingStackpos = ReserveName("atomic_stackpos"); writer.WriteLine($"int {startingStackpos} = stackpos;"); writer.WriteLine(); // Emit the child. EmitNode(node.Child(0), subsequent); writer.WriteLine(); // Reset the stack position and done label. writer.WriteLine($"stackpos = {startingStackpos};"); doneLabel = originalDoneLabel; } // Emits the code to handle updating base.runtextpos to pos in response to // an UpdateBumpalong node. This is used when we want to inform the scan loop that // it should bump from this location rather than from the original location. void EmitUpdateBumpalong(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.UpdateBumpalong, $"Unexpected type: {node.Kind}"); TransferSliceStaticPosToPos(); using (EmitBlock(writer, "if (base.runtextpos < pos)")) { writer.WriteLine("base.runtextpos = pos;"); } } // Emits code for a concatenation void EmitConcatenation(RegexNode node, RegexNode? subsequent, bool emitLengthChecksIfRequired) { Debug.Assert(node.Kind is RegexNodeKind.Concatenate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); // Emit the code for each child one after the other. string? prevDescription = null; int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { // If we can find a subsequence of fixed-length children, we can emit a length check once for that sequence // and then skip the individual length checks for each. We also want to minimize the repetition of if blocks, // and so we try to emit a series of clauses all part of the same if block rather than one if block per child. if (emitLengthChecksIfRequired && node.TryGetJoinableLengthCheckChildRange(i, out int requiredLength, out int exclusiveEnd)) { bool wroteClauses = true; writer.Write($"if ({SpanLengthCheck(requiredLength)}"); while (i < exclusiveEnd) { for (; i < exclusiveEnd; i++) { void WriteSingleCharChild(RegexNode child, bool includeDescription = true) { if (wroteClauses) { writer.WriteLine(prevDescription is not null ? $" || // {prevDescription}" : " ||"); writer.Write(" "); } else { writer.Write("if ("); } EmitSingleChar(child, emitLengthCheck: false, clauseOnly: true); prevDescription = includeDescription ? DescribeNode(child, analysis) : null; wroteClauses = true; } RegexNode child = node.Child(i); if (child.Kind is RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set) { WriteSingleCharChild(child); } else if (child.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Onelazy or RegexNodeKind.Oneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloopatomic && child.M == child.N && child.M <= MaxUnrollSize) { for (int c = 0; c < child.M; c++) { WriteSingleCharChild(child, includeDescription: c == 0); } } else { break; } } if (wroteClauses) { writer.WriteLine(prevDescription is not null ? $") // {prevDescription}" : ")"); using (EmitBlock(writer, null)) { Goto(doneLabel); } if (i < childCount) { writer.WriteLine(); } wroteClauses = false; prevDescription = null; } if (i < exclusiveEnd) { EmitNode(node.Child(i), GetSubsequentOrDefault(i, node, subsequent), emitLengthChecksIfRequired: false); if (i < childCount - 1) { writer.WriteLine(); } i++; } } i--; continue; } EmitNode(node.Child(i), GetSubsequentOrDefault(i, node, subsequent), emitLengthChecksIfRequired: emitLengthChecksIfRequired); if (i < childCount - 1) { writer.WriteLine(); } } // Gets the node to treat as the subsequent one to node.Child(index) static RegexNode? GetSubsequentOrDefault(int index, RegexNode node, RegexNode? defaultNode) { int childCount = node.ChildCount(); for (int i = index + 1; i < childCount; i++) { RegexNode next = node.Child(i); if (next.Kind is not RegexNodeKind.UpdateBumpalong) // skip node types that don't have a semantic impact { return next; } } return defaultNode; } } // Emits the code to handle a single-character match. void EmitSingleChar(RegexNode node, bool emitLengthCheck = true, string? offset = null, bool clauseOnly = false) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); // This only emits a single check, but it's called from the looping constructs in a loop // to generate the code for a single check, so we map those looping constructs to the // appropriate single check. string expr = $"{sliceSpan}[{Sum(sliceStaticPos, offset)}]"; if (node.IsSetFamily) { expr = $"{MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: true, additionalDeclarations, ref requiredHelpers)}"; } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "!=" : "==")} {Literal(node.Ch)}"; } if (clauseOnly) { writer.Write(expr); } else { using (EmitBlock(writer, emitLengthCheck ? $"if ({SpanLengthCheck(1, offset)} || {expr})" : $"if ({expr})")) { Goto(doneLabel); } } sliceStaticPos++; } // Emits the code to handle a boundary check on a character. void EmitBoundary(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Boundary or RegexNodeKind.NonBoundary or RegexNodeKind.ECMABoundary or RegexNodeKind.NonECMABoundary, $"Unexpected type: {node.Kind}"); string call = node.Kind switch { RegexNodeKind.Boundary => "!IsBoundary", RegexNodeKind.NonBoundary => "IsBoundary", RegexNodeKind.ECMABoundary => "!IsECMABoundary", _ => "IsECMABoundary", }; RequiredHelperFunctions boundaryFunctionRequired = node.Kind switch { RegexNodeKind.Boundary or RegexNodeKind.NonBoundary => RequiredHelperFunctions.IsBoundary | RequiredHelperFunctions.IsWordChar, // IsBoundary internally uses IsWordChar _ => RequiredHelperFunctions.IsECMABoundary }; requiredHelpers |= boundaryFunctionRequired; using (EmitBlock(writer, $"if ({call}(inputSpan, pos{(sliceStaticPos > 0 ? $" + {sliceStaticPos}" : "")}))")) { Goto(doneLabel); } } // Emits the code to handle various anchors. void EmitAnchors(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Beginning or RegexNodeKind.Start or RegexNodeKind.Bol or RegexNodeKind.End or RegexNodeKind.EndZ or RegexNodeKind.Eol, $"Unexpected type: {node.Kind}"); Debug.Assert(sliceStaticPos >= 0); switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: if (sliceStaticPos > 0) { // If we statically know we've already matched part of the regex, there's no way we're at the // beginning or start, as we've already progressed past it. Goto(doneLabel); } else { using (EmitBlock(writer, node.Kind == RegexNodeKind.Beginning ? "if (pos != 0)" : "if (pos != base.runtextstart)")) { Goto(doneLabel); } } break; case RegexNodeKind.Bol: if (sliceStaticPos > 0) { using (EmitBlock(writer, $"if ({sliceSpan}[{sliceStaticPos - 1}] != '\\n')")) { Goto(doneLabel); } } else { // We can't use our slice in this case, because we'd need to access slice[-1], so we access the inputSpan field directly: using (EmitBlock(writer, $"if (pos > 0 && inputSpan[pos - 1] != '\\n')")) { Goto(doneLabel); } } break; case RegexNodeKind.End: using (EmitBlock(writer, $"if ({IsSliceLengthGreaterThanSliceStaticPos()})")) { Goto(doneLabel); } break; case RegexNodeKind.EndZ: writer.WriteLine($"if ({sliceSpan}.Length > {sliceStaticPos + 1} || ({IsSliceLengthGreaterThanSliceStaticPos()} && {sliceSpan}[{sliceStaticPos}] != '\\n'))"); using (EmitBlock(writer, null)) { Goto(doneLabel); } break; case RegexNodeKind.Eol: using (EmitBlock(writer, $"if ({IsSliceLengthGreaterThanSliceStaticPos()} && {sliceSpan}[{sliceStaticPos}] != '\\n')")) { Goto(doneLabel); } break; string IsSliceLengthGreaterThanSliceStaticPos() => sliceStaticPos == 0 ? $"!{sliceSpan}.IsEmpty" : $"{sliceSpan}.Length > {sliceStaticPos}"; } } // Emits the code to handle a multiple-character match. void EmitMultiChar(RegexNode node, bool emitLengthCheck) { Debug.Assert(node.Kind is RegexNodeKind.Multi, $"Unexpected type: {node.Kind}"); Debug.Assert(node.Str is not null); EmitMultiCharString(node.Str, IsCaseInsensitive(node), emitLengthCheck); } void EmitMultiCharString(string str, bool caseInsensitive, bool emitLengthCheck) { Debug.Assert(str.Length >= 2); if (caseInsensitive) // StartsWith(..., XxIgnoreCase) won't necessarily be the same as char-by-char comparison { // This case should be relatively rare. It will only occur with IgnoreCase and a series of non-ASCII characters. if (emitLengthCheck) { EmitSpanLengthCheck(str.Length); } using (EmitBlock(writer, $"for (int i = 0; i < {Literal(str)}.Length; i++)")) { string textSpanIndex = sliceStaticPos > 0 ? $"i + {sliceStaticPos}" : "i"; using (EmitBlock(writer, $"if ({ToLower(hasTextInfo, options, $"{sliceSpan}[{textSpanIndex}]")} != {Literal(str)}[i])")) { Goto(doneLabel); } } } else { string sourceSpan = sliceStaticPos > 0 ? $"{sliceSpan}.Slice({sliceStaticPos})" : sliceSpan; using (EmitBlock(writer, $"if (!global::System.MemoryExtensions.StartsWith({sourceSpan}, {Literal(str)}))")) { Goto(doneLabel); } } sliceStaticPos += str.Length; } void EmitSingleCharLoop(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop, $"Unexpected type: {node.Kind}"); // If this is actually atomic based on its parent, emit it as atomic instead; no backtracking necessary. if (analysis.IsAtomicByAncestor(node)) { EmitSingleCharAtomicLoop(node); return; } // If this is actually a repeater, emit that instead; no backtracking necessary. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // Emit backtracking around an atomic single char loop. We can then implement the backtracking // as an afterthought, since we know exactly how many characters are accepted by each iteration // of the wrapped loop (1) and that there's nothing captured by the loop. Debug.Assert(node.M < node.N); string backtrackingLabel = ReserveName("CharLoopBacktrack"); string endLoop = ReserveName("CharLoopEnd"); string startingPos = ReserveName("charloop_starting_pos"); string endingPos = ReserveName("charloop_ending_pos"); additionalDeclarations.Add($"int {startingPos} = 0, {endingPos} = 0;"); // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // Grab the current position, then emit the loop as atomic, and then // grab the current position again. Even though we emit the loop without // knowledge of backtracking, we can layer it on top by just walking back // through the individual characters (a benefit of the loop matching exactly // one character per iteration, no possible captures within the loop, etc.) writer.WriteLine($"{startingPos} = pos;"); writer.WriteLine(); EmitSingleCharAtomicLoop(node); writer.WriteLine(); TransferSliceStaticPosToPos(); writer.WriteLine($"{endingPos} = pos;"); EmitAdd(writer, startingPos, node.M); Goto(endLoop); writer.WriteLine(); // Backtracking section. Subsequent failures will jump to here, at which // point we decrement the matched count as long as it's above the minimum // required, and try again by flowing to everything that comes after this. MarkLabel(backtrackingLabel, emitSemicolon: false); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } EmitStackPop(endingPos, startingPos); writer.WriteLine(); if (subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal) { writer.WriteLine($"if ({startingPos} >= {endingPos} ||"); using (EmitBlock(writer, literal.Item2 is not null ? $" ({endingPos} = global::System.MemoryExtensions.LastIndexOf(inputSpan.Slice({startingPos}, global::System.Math.Min(inputSpan.Length, {endingPos} + {literal.Item2.Length - 1}) - {startingPos}), {Literal(literal.Item2)})) < 0)" : literal.Item3 is null ? $" ({endingPos} = global::System.MemoryExtensions.LastIndexOf(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item1)})) < 0)" : literal.Item3.Length switch { 2 => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])})) < 0)", 3 => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])}, {Literal(literal.Item3[2])})) < 0)", _ => $" ({endingPos} = global::System.MemoryExtensions.LastIndexOfAny(inputSpan.Slice({startingPos}, {endingPos} - {startingPos}), {Literal(literal.Item3)})) < 0)", })) { Goto(doneLabel); } writer.WriteLine($"{endingPos} += {startingPos};"); writer.WriteLine($"pos = {endingPos};"); } else { using (EmitBlock(writer, $"if ({startingPos} >= {endingPos})")) { Goto(doneLabel); } writer.WriteLine($"pos = --{endingPos};"); } SliceInputSpan(writer); writer.WriteLine(); MarkLabel(endLoop, emitSemicolon: false); EmitStackPush(expressionHasCaptures ? new[] { startingPos, endingPos, "base.Crawlpos()" } : new[] { startingPos, endingPos }); doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes } void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy, $"Unexpected type: {node.Kind}"); // Emit the min iterations as a repeater. Any failures here don't necessitate backtracking, // as the lazy itself failed to match, and there's no backtracking possible by the individual // characters/iterations themselves. if (node.M > 0) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); } // If the whole thing was actually that repeater, we're done. Similarly, if this is actually an atomic // lazy loop, nothing will ever backtrack into this node, so we never need to iterate more than the minimum. if (node.M == node.N || analysis.IsAtomicByAncestor(node)) { return; } if (node.M > 0) { // We emitted a repeater to handle the required iterations; add a newline after it. writer.WriteLine(); } Debug.Assert(node.M < node.N); // We now need to match one character at a time, each time allowing the remainder of the expression // to try to match, and only matching another character if the subsequent expression fails to match. // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // If the loop isn't unbounded, track the number of iterations and the max number to allow. string? iterationCount = null; string? maxIterations = null; if (node.N != int.MaxValue) { maxIterations = $"{node.N - node.M}"; iterationCount = ReserveName("lazyloop_iteration"); writer.WriteLine($"int {iterationCount} = 0;"); } // Track the current crawl position. Upon backtracking, we'll unwind any captures beyond this point. string? capturePos = null; if (expressionHasCaptures) { capturePos = ReserveName("lazyloop_capturepos"); additionalDeclarations.Add($"int {capturePos} = 0;"); } // Track the current pos. Each time we backtrack, we'll reset to the stored position, which // is also incremented each time we match another character in the loop. string startingPos = ReserveName("lazyloop_pos"); additionalDeclarations.Add($"int {startingPos} = 0;"); writer.WriteLine($"{startingPos} = pos;"); // Skip the backtracking section for the initial subsequent matching. We've already matched the // minimum number of iterations, which means we can successfully match with zero additional iterations. string endLoopLabel = ReserveName("LazyLoopEnd"); Goto(endLoopLabel); writer.WriteLine(); // Backtracking section. Subsequent failures will jump to here. string backtrackingLabel = ReserveName("LazyLoopBacktrack"); MarkLabel(backtrackingLabel, emitSemicolon: false); // Uncapture any captures if the expression has any. It's possible the captures it has // are before this node, in which case this is wasted effort, but still functionally correct. if (capturePos is not null) { EmitUncaptureUntil(capturePos); } // If there's a max number of iterations, see if we've exceeded the maximum number of characters // to match. If we haven't, increment the iteration count. if (maxIterations is not null) { using (EmitBlock(writer, $"if ({iterationCount} >= {maxIterations})")) { Goto(doneLabel); } writer.WriteLine($"{iterationCount}++;"); } // Now match the next item in the lazy loop. We need to reset the pos to the position // just after the last character in this loop was matched, and we need to store the resulting position // for the next time we backtrack. writer.WriteLine($"pos = {startingPos};"); SliceInputSpan(writer); EmitSingleChar(node); TransferSliceStaticPosToPos(); // Now that we've appropriately advanced by one character and are set for what comes after the loop, // see if we can skip ahead more iterations by doing a search for a following literal. if (iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && !IsCaseInsensitive(node) && subsequent?.FindStartingLiteral(4) is ValueTuple<char, string?, string?> literal && // 5 == max optimized by IndexOfAny, and we need to reserve 1 for node.Ch (literal.Item3 is not null ? !literal.Item3.Contains(node.Ch) : (literal.Item2?[0] ?? literal.Item1) != node.Ch)) // no overlap between node.Ch and the start of the literal { // e.g. "<[^>]*?>" // This lazy loop will consume all characters other than node.Ch until the subsequent literal. // We can implement it to search for either that char or the literal, whichever comes first. // If it ends up being that node.Ch, the loop fails (we're only here if we're backtracking). writer.WriteLine( literal.Item2 is not null ? $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item2[0])});" : literal.Item3 is null ? $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item1)});" : literal.Item3.Length switch { 2 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch)}, {Literal(literal.Item3[0])}, {Literal(literal.Item3[1])});", _ => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(node.Ch + literal.Item3)});", }); using (EmitBlock(writer, $"if ((uint){startingPos} >= (uint){sliceSpan}.Length || {sliceSpan}[{startingPos}] == {Literal(node.Ch)})")) { Goto(doneLabel); } writer.WriteLine($"pos += {startingPos};"); SliceInputSpan(writer); } else if (iterationCount is null && node.Kind is RegexNodeKind.Setlazy && node.Str == RegexCharClass.AnyClass && subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal2) { // e.g. ".*?string" with RegexOptions.Singleline // This lazy loop will consume all characters until the subsequent literal. If the subsequent literal // isn't found, the loop fails. We can implement it to just search for that literal. writer.WriteLine( literal2.Item2 is not null ? $"{startingPos} = global::System.MemoryExtensions.IndexOf({sliceSpan}, {Literal(literal2.Item2)});" : literal2.Item3 is null ? $"{startingPos} = global::System.MemoryExtensions.IndexOf({sliceSpan}, {Literal(literal2.Item1)});" : literal2.Item3.Length switch { 2 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3[0])}, {Literal(literal2.Item3[1])});", 3 => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3[0])}, {Literal(literal2.Item3[1])}, {Literal(literal2.Item3[2])});", _ => $"{startingPos} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}, {Literal(literal2.Item3)});", }); using (EmitBlock(writer, $"if ({startingPos} < 0)")) { Goto(doneLabel); } writer.WriteLine($"pos += {startingPos};"); SliceInputSpan(writer); } // Store the position we've left off at in case we need to iterate again. writer.WriteLine($"{startingPos} = pos;"); // Update the done label for everything that comes after this node. This is done after we emit the single char // matching, as that failing indicates the loop itself has failed to match. string originalDoneLabel = doneLabel; doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes writer.WriteLine(); MarkLabel(endLoopLabel); if (capturePos is not null) { writer.WriteLine($"{capturePos} = base.Crawlpos();"); } if (node.IsInLoop()) { writer.WriteLine(); // Store the loop's state var toPushPop = new List<string>(3) { startingPos }; if (capturePos is not null) { toPushPop.Add(capturePos); } if (iterationCount is not null) { toPushPop.Add(iterationCount); } string[] toPushPopArray = toPushPop.ToArray(); EmitStackPush(toPushPopArray); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label string backtrack = ReserveName("CharLazyBacktrack"); MarkLabel(backtrack, emitSemicolon: false); Array.Reverse(toPushPopArray); EmitStackPop(toPushPopArray); Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } void EmitLazy(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; string originalDoneLabel = doneLabel; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually an atomic lazy loop, we need to output just the minimum number of iterations, // as nothing will backtrack into the lazy loop to get it progress further. if (isAtomic) { switch (minIterations) { case 0: // Atomic lazy with a min count of 0: nop. return; case 1: // Atomic lazy with a min count of 1: just output the child, no looping required. EmitNode(node.Child(0)); return; } writer.WriteLine(); } // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); string startingPos = ReserveName("lazyloop_starting_pos"); string iterationCount = ReserveName("lazyloop_iteration"); string sawEmpty = ReserveName("lazyLoopEmptySeen"); string body = ReserveName("LazyLoopBody"); string endLoop = ReserveName("LazyLoopEnd"); writer.WriteLine($"int {iterationCount} = 0, {startingPos} = pos, {sawEmpty} = 0;"); // If the min count is 0, start out by jumping right to what's after the loop. Backtracking // will then bring us back in to do further iterations. if (minIterations == 0) { Goto(endLoop); } writer.WriteLine(); // Iteration body MarkLabel(body, emitSemicolon: false); EmitTimeoutCheck(writer, hasTimeout); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackPush(expressionHasCaptures ? new[] { "base.Crawlpos()", startingPos, "pos", sawEmpty } : new[] { startingPos, "pos", sawEmpty }); writer.WriteLine(); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. writer.WriteLine($"{startingPos} = pos;"); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. writer.WriteLine($"{iterationCount}++;"); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. string iterationFailedLabel = ReserveName("LazyLoopIterationNoMatch"); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); writer.WriteLine(); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 if (doneLabel == iterationFailedLabel) { doneLabel = originalDoneLabel; } // Loop condition. Continue iterating if we've not yet reached the minimum. if (minIterations > 0) { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, minIterations)})")) { Goto(body); } } // If the last iteration was empty, we need to prevent further iteration from this point // unless we backtrack out of this iteration. We can do that easily just by pretending // we reached the max iteration count. using (EmitBlock(writer, $"if (pos == {startingPos})")) { writer.WriteLine($"{sawEmpty} = 1;"); } // We matched the next iteration. Jump to the subsequent code. Goto(endLoop); writer.WriteLine(); // Now handle what happens when an iteration fails. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel, emitSemicolon: false); writer.WriteLine($"{iterationCount}--;"); using (EmitBlock(writer, $"if ({iterationCount} < 0)")) { Goto(originalDoneLabel); } EmitStackPop(sawEmpty, "pos", startingPos); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } SliceInputSpan(writer); if (doneLabel == originalDoneLabel) { Goto(originalDoneLabel); } else { using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } Goto(doneLabel); } writer.WriteLine(); MarkLabel(endLoop); if (!isAtomic) { // Store the capture's state and skip the backtracking section EmitStackPush(startingPos, iterationCount, sawEmpty); string skipBacktrack = ReserveName("SkipBacktrack"); Goto(skipBacktrack); writer.WriteLine(); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label string backtrack = ReserveName($"LazyLoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(sawEmpty, iterationCount, startingPos); if (maxIterations == int.MaxValue) { using (EmitBlock(writer, $"if ({sawEmpty} == 0)")) { Goto(body); } } else { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, maxIterations)} && {sawEmpty} == 0)")) { Goto(body); } } Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(skipBacktrack); } } // Emits the code to handle a loop (repeater) with a fixed number of iterations. // RegexNode.M is used for the number of iterations (RegexNode.N is ignored), as this // might be used to implement the required iterations of other kinds of loops. void EmitSingleCharRepeater(RegexNode node, bool emitLengthCheck = true) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); int iterations = node.M; switch (iterations) { case 0: // No iterations, nothing to do. return; case 1: // Just match the individual item EmitSingleChar(node, emitLengthCheck); return; case <= RegexNode.MultiVsRepeaterLimit when node.IsOneFamily && !IsCaseInsensitive(node): // This is a repeated case-sensitive character; emit it as a multi in order to get all the optimizations // afforded to a multi, e.g. unrolling the loop with multi-char reads/comparisons at a time. EmitMultiCharString(new string(node.Ch, iterations), caseInsensitive: false, emitLengthCheck); return; } if (iterations <= MaxUnrollSize) { // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length || // slice[sliceStaticPos] != c1 || // slice[sliceStaticPos + 1] != c2 || // ...) // { // goto doneLabel; // } writer.Write($"if ("); if (emitLengthCheck) { writer.WriteLine($"{SpanLengthCheck(iterations)} ||"); writer.Write(" "); } EmitSingleChar(node, emitLengthCheck: false, clauseOnly: true); for (int i = 1; i < iterations; i++) { writer.WriteLine(" ||"); writer.Write(" "); EmitSingleChar(node, emitLengthCheck: false, clauseOnly: true); } writer.WriteLine(")"); using (EmitBlock(writer, null)) { Goto(doneLabel); } } else { // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length) goto doneLabel; if (emitLengthCheck) { EmitSpanLengthCheck(iterations); } string repeaterSpan = "repeaterSlice"; // As this repeater doesn't wrap arbitrary node emits, this shouldn't conflict with anything writer.WriteLine($"global::System.ReadOnlySpan<char> {repeaterSpan} = {sliceSpan}.Slice({sliceStaticPos}, {iterations});"); using (EmitBlock(writer, $"for (int i = 0; i < {repeaterSpan}.Length; i++)")) { EmitTimeoutCheck(writer, hasTimeout); string tmpTextSpanLocal = sliceSpan; // we want EmitSingleChar to refer to this temporary int tmpSliceStaticPos = sliceStaticPos; sliceSpan = repeaterSpan; sliceStaticPos = 0; EmitSingleChar(node, emitLengthCheck: false, offset: "i"); sliceSpan = tmpTextSpanLocal; sliceStaticPos = tmpSliceStaticPos; } sliceStaticPos += iterations; } } // Emits the code to handle a non-backtracking, variable-length loop around a single character comparison. void EmitSingleCharAtomicLoop(RegexNode node, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // If this is actually an optional single char, emit that instead. if (node.M == 0 && node.N == 1) { EmitAtomicSingleCharZeroOrOne(node); return; } Debug.Assert(node.N > node.M); int minIterations = node.M; int maxIterations = node.N; Span<char> setChars = stackalloc char[5]; // 5 is max optimized by IndexOfAny today int numSetChars = 0; string iterationLocal = ReserveName("iteration"); if (node.IsNotoneFamily && maxIterations == int.MaxValue && (!IsCaseInsensitive(node))) { // For Notone, we're looking for a specific character, as everything until we find // it is consumed by the loop. If we're unbounded, such as with ".*" and if we're case-sensitive, // we can use the vectorized IndexOf to do the search, rather than open-coding it. The unbounded // restriction is purely for simplicity; it could be removed in the future with additional code to // handle the unbounded case. writer.Write($"int {iterationLocal} = global::System.MemoryExtensions.IndexOf({sliceSpan}"); if (sliceStaticPos > 0) { writer.Write($".Slice({sliceStaticPos})"); } writer.WriteLine($", {Literal(node.Ch)});"); using (EmitBlock(writer, $"if ({iterationLocal} < 0)")) { writer.WriteLine(sliceStaticPos > 0 ? $"{iterationLocal} = {sliceSpan}.Length - {sliceStaticPos};" : $"{iterationLocal} = {sliceSpan}.Length;"); } writer.WriteLine(); } else if (node.IsSetFamily && maxIterations == int.MaxValue && !IsCaseInsensitive(node) && (numSetChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0 && RegexCharClass.IsNegated(node.Str!)) { // If the set is negated and contains only a few characters (if it contained 1 and was negated, it should // have been reduced to a Notone), we can use an IndexOfAny to find any of the target characters. // As with the notoneloopatomic above, the unbounded constraint is purely for simplicity. Debug.Assert(numSetChars > 1); writer.Write($"int {iterationLocal} = global::System.MemoryExtensions.IndexOfAny({sliceSpan}"); if (sliceStaticPos != 0) { writer.Write($".Slice({sliceStaticPos})"); } writer.WriteLine(numSetChars switch { 2 => $", {Literal(setChars[0])}, {Literal(setChars[1])});", 3 => $", {Literal(setChars[0])}, {Literal(setChars[1])}, {Literal(setChars[2])});", _ => $", {Literal(setChars.Slice(0, numSetChars).ToString())});", }); using (EmitBlock(writer, $"if ({iterationLocal} < 0)")) { writer.WriteLine(sliceStaticPos > 0 ? $"{iterationLocal} = {sliceSpan}.Length - {sliceStaticPos};" : $"{iterationLocal} = {sliceSpan}.Length;"); } writer.WriteLine(); } else if (node.IsSetFamily && maxIterations == int.MaxValue && node.Str == RegexCharClass.AnyClass) { // .* was used with RegexOptions.Singleline, which means it'll consume everything. Just jump to the end. // The unbounded constraint is the same as in the Notone case above, done purely for simplicity. TransferSliceStaticPosToPos(); writer.WriteLine($"int {iterationLocal} = inputSpan.Length - pos;"); } else { // For everything else, do a normal loop. string expr = $"{sliceSpan}[{iterationLocal}]"; if (node.IsSetFamily) { expr = MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: false, additionalDeclarations, ref requiredHelpers); } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "==" : "!=")} {Literal(node.Ch)}"; } if (minIterations != 0 || maxIterations != int.MaxValue) { // For any loops other than * loops, transfer text pos to pos in // order to zero it out to be able to use the single iteration variable // for both iteration count and indexer. TransferSliceStaticPosToPos(); } writer.WriteLine($"int {iterationLocal} = {sliceStaticPos};"); sliceStaticPos = 0; string maxClause = maxIterations != int.MaxValue ? $"{CountIsLessThan(iterationLocal, maxIterations)} && " : ""; using (EmitBlock(writer, $"while ({maxClause}(uint){iterationLocal} < (uint){sliceSpan}.Length && {expr})")) { EmitTimeoutCheck(writer, hasTimeout); writer.WriteLine($"{iterationLocal}++;"); } writer.WriteLine(); } // Check to ensure we've found at least min iterations. if (minIterations > 0) { using (EmitBlock(writer, $"if ({CountIsLessThan(iterationLocal, minIterations)})")) { Goto(doneLabel); } writer.WriteLine(); } // Now that we've completed our optional iterations, advance the text span // and pos by the number of iterations completed. writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice({iterationLocal});"); writer.WriteLine($"pos += {iterationLocal};"); } // Emits the code to handle a non-backtracking optional zero-or-one loop. void EmitAtomicSingleCharZeroOrOne(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M == 0 && node.N == 1); string expr = $"{sliceSpan}[{sliceStaticPos}]"; if (node.IsSetFamily) { expr = MatchCharacterClass(hasTextInfo, options, expr, node.Str!, IsCaseInsensitive(node), negate: false, additionalDeclarations, ref requiredHelpers); } else { expr = ToLowerIfNeeded(hasTextInfo, options, expr, IsCaseInsensitive(node)); expr = $"{expr} {(node.IsOneFamily ? "==" : "!=")} {Literal(node.Ch)}"; } string spaceAvailable = sliceStaticPos != 0 ? $"(uint){sliceSpan}.Length > (uint){sliceStaticPos}" : $"!{sliceSpan}.IsEmpty"; using (EmitBlock(writer, $"if ({spaceAvailable} && {expr})")) { writer.WriteLine($"{sliceSpan} = {sliceSpan}.Slice(1);"); writer.WriteLine($"pos++;"); } } void EmitNonBacktrackingRepeater(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.M == node.N, $"Unexpected M={node.M} == N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(!analysis.MayBacktrack(node.Child(0)), $"Expected non-backtracking node {node.Kind}"); // Ensure every iteration of the loop sees a consistent value. TransferSliceStaticPosToPos(); // Loop M==N times to match the child exactly that numbers of times. string i = ReserveName("loop_iteration"); using (EmitBlock(writer, $"for (int {i} = 0; {i} < {node.M}; {i}++)")) { EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // make sure static the static position remains at 0 for subsequent constructs } } void EmitLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); string originalDoneLabel = doneLabel; string startingPos = ReserveName("loop_starting_pos"); string iterationCount = ReserveName("loop_iteration"); string body = ReserveName("LoopBody"); string endLoop = ReserveName("LoopEnd"); additionalDeclarations.Add($"int {iterationCount} = 0, {startingPos} = 0;"); writer.WriteLine($"{iterationCount} = 0;"); writer.WriteLine($"{startingPos} = pos;"); writer.WriteLine(); // Iteration body MarkLabel(body, emitSemicolon: false); EmitTimeoutCheck(writer, hasTimeout); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackPush(expressionHasCaptures ? new[] { "base.Crawlpos()", startingPos, "pos" } : new[] { startingPos, "pos" }); writer.WriteLine(); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. writer.WriteLine($"{startingPos} = pos;"); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. writer.WriteLine($"{iterationCount}++;"); writer.WriteLine(); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. string iterationFailedLabel = ReserveName("LoopIterationNoMatch"); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); writer.WriteLine(); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 bool childBacktracks = doneLabel != iterationFailedLabel; // Loop condition. Continue iterating greedily if we've not yet reached the maximum. We also need to stop // iterating if the iteration matched empty and we already hit the minimum number of iterations. using (EmitBlock(writer, (minIterations > 0, maxIterations == int.MaxValue) switch { (true, true) => $"if (pos != {startingPos} || {CountIsLessThan(iterationCount, minIterations)})", (true, false) => $"if ((pos != {startingPos} || {CountIsLessThan(iterationCount, minIterations)}) && {CountIsLessThan(iterationCount, maxIterations)})", (false, true) => $"if (pos != {startingPos})", (false, false) => $"if (pos != {startingPos} && {CountIsLessThan(iterationCount, maxIterations)})", })) { Goto(body); } // We've matched as many iterations as we can with this configuration. Jump to what comes after the loop. Goto(endLoop); writer.WriteLine(); // Now handle what happens when an iteration fails, which could be an initial failure or it // could be while backtracking. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel, emitSemicolon: false); writer.WriteLine($"{iterationCount}--;"); using (EmitBlock(writer, $"if ({iterationCount} < 0)")) { Goto(originalDoneLabel); } EmitStackPop("pos", startingPos); if (expressionHasCaptures) { EmitUncaptureUntil(StackPop()); } SliceInputSpan(writer); if (minIterations > 0) { using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } using (EmitBlock(writer, $"if ({CountIsLessThan(iterationCount, minIterations)})")) { Goto(childBacktracks ? doneLabel : originalDoneLabel); } } if (isAtomic) { doneLabel = originalDoneLabel; MarkLabel(endLoop); } else { if (childBacktracks) { Goto(endLoop); writer.WriteLine(); string backtrack = ReserveName("LoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); using (EmitBlock(writer, $"if ({iterationCount} == 0)")) { Goto(originalDoneLabel); } Goto(doneLabel); doneLabel = backtrack; } MarkLabel(endLoop); if (node.IsInLoop()) { writer.WriteLine(); // Store the loop's state EmitStackPush(startingPos, iterationCount); // Skip past the backtracking section string end = ReserveName("SkipBacktrack"); Goto(end); writer.WriteLine(); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label string backtrack = ReserveName("LoopBacktrack"); MarkLabel(backtrack, emitSemicolon: false); EmitStackPop(iterationCount, startingPos); Goto(doneLabel); writer.WriteLine(); doneLabel = backtrack; MarkLabel(end); } } } // Gets a comparison for whether the value is less than the upper bound. static string CountIsLessThan(string value, int exclusiveUpper) => exclusiveUpper == 1 ? $"{value} == 0" : $"{value} < {exclusiveUpper}"; // Emits code to unwind the capture stack until the crawl position specified in the provided local. void EmitUncaptureUntil(string capturepos) { string name = "UncaptureUntil"; if (!additionalLocalFunctions.ContainsKey(name)) { var lines = new string[9]; lines[0] = "// <summary>Undo captures until we reach the specified capture position.</summary>"; lines[1] = "[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"void {name}(int capturepos)"; lines[3] = "{"; lines[4] = " while (base.Crawlpos() > capturepos)"; lines[5] = " {"; lines[6] = " base.Uncapture();"; lines[7] = " }"; lines[8] = "}"; additionalLocalFunctions.Add(name, lines); } writer.WriteLine($"{name}({capturepos});"); } /// <summary>Pushes values on to the backtracking stack.</summary> void EmitStackPush(params string[] args) { Debug.Assert(args.Length is >= 1); string function = $"StackPush{args.Length}"; additionalDeclarations.Add("int stackpos = 0;"); if (!additionalLocalFunctions.ContainsKey(function)) { var lines = new string[24 + args.Length]; lines[0] = $"// <summary>Push {args.Length} value{(args.Length == 1 ? "" : "s")} onto the backtracking stack.</summary>"; lines[1] = $"[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"static void {function}(ref int[] stack, ref int pos{FormatN(", int arg{0}", args.Length)})"; lines[3] = $"{{"; lines[4] = $" // If there's space available for {(args.Length > 1 ? $"all {args.Length} values, store them" : "the value, store it")}."; lines[5] = $" int[] s = stack;"; lines[6] = $" int p = pos;"; lines[7] = $" if ((uint){(args.Length > 1 ? $"(p + {args.Length - 1})" : "p")} < (uint)s.Length)"; lines[8] = $" {{"; for (int i = 0; i < args.Length; i++) { lines[9 + i] = $" s[p{(i == 0 ? "" : $" + {i}")}] = arg{i};"; } lines[9 + args.Length] = args.Length > 1 ? $" pos += {args.Length};" : " pos++;"; lines[10 + args.Length] = $" return;"; lines[11 + args.Length] = $" }}"; lines[12 + args.Length] = $""; lines[13 + args.Length] = $" // Otherwise, resize the stack to make room and try again."; lines[14 + args.Length] = $" WithResize(ref stack, ref pos{FormatN(", arg{0}", args.Length)});"; lines[15 + args.Length] = $""; lines[16 + args.Length] = $" // <summary>Resize the backtracking stack array and push {args.Length} value{(args.Length == 1 ? "" : "s")} onto the stack.</summary>"; lines[17 + args.Length] = $" [global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.NoInlining)]"; lines[18 + args.Length] = $" static void WithResize(ref int[] stack, ref int pos{FormatN(", int arg{0}", args.Length)})"; lines[19 + args.Length] = $" {{"; lines[20 + args.Length] = $" global::System.Array.Resize(ref stack, (pos + {args.Length - 1}) * 2);"; lines[21 + args.Length] = $" {function}(ref stack, ref pos{FormatN(", arg{0}", args.Length)});"; lines[22 + args.Length] = $" }}"; lines[23 + args.Length] = $"}}"; additionalLocalFunctions.Add(function, lines); } writer.WriteLine($"{function}(ref base.runstack!, ref stackpos, {string.Join(", ", args)});"); } /// <summary>Pops values from the backtracking stack into the specified locations.</summary> void EmitStackPop(params string[] args) { Debug.Assert(args.Length is >= 1); if (args.Length == 1) { writer.WriteLine($"{args[0]} = {StackPop()};"); return; } string function = $"StackPop{args.Length}"; if (!additionalLocalFunctions.ContainsKey(function)) { var lines = new string[5 + args.Length]; lines[0] = $"// <summary>Pop {args.Length} value{(args.Length == 1 ? "" : "s")} from the backtracking stack.</summary>"; lines[1] = $"[global::System.Runtime.CompilerServices.MethodImpl(global::System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]"; lines[2] = $"static void {function}(int[] stack, ref int pos{FormatN(", out int arg{0}", args.Length)})"; lines[3] = $"{{"; for (int i = 0; i < args.Length; i++) { lines[4 + i] = $" arg{i} = stack[--pos];"; } lines[4 + args.Length] = $"}}"; additionalLocalFunctions.Add(function, lines); } writer.WriteLine($"{function}(base.runstack, ref stackpos, out {string.Join(", out ", args)});"); } /// <summary>Expression for popping the next item from the backtracking stack.</summary> string StackPop() => "base.runstack![--stackpos]"; /// <summary>Concatenates the strings resulting from formatting the format string with the values [0, count).</summary> static string FormatN(string format, int count) => string.Concat(from i in Enumerable.Range(0, count) select string.Format(format, i)); } private static bool EmitLoopTimeoutCounterIfNeeded(IndentedTextWriter writer, RegexMethod rm) { if (rm.MatchTimeout != Timeout.Infinite) { writer.WriteLine("int loopTimeoutCounter = 0;"); return true; } return false; } /// <summary>Emits a timeout check.</summary> private static void EmitTimeoutCheck(IndentedTextWriter writer, bool hasTimeout) { const int LoopTimeoutCheckCount = 2048; // A conservative value to guarantee the correct timeout handling. if (hasTimeout) { // Increment counter for each loop iteration. // Emit code to check the timeout every 2048th iteration. using (EmitBlock(writer, $"if (++loopTimeoutCounter == {LoopTimeoutCheckCount})")) { writer.WriteLine("loopTimeoutCounter = 0;"); writer.WriteLine("base.CheckTimeout();"); } writer.WriteLine(); } } private static bool EmitInitializeCultureForTryMatchAtCurrentPositionIfNecessary(IndentedTextWriter writer, RegexMethod rm, AnalysisResults analysis) { if (analysis.HasIgnoreCase && ((RegexOptions)rm.Options & RegexOptions.CultureInvariant) == 0) { writer.WriteLine("global::System.Globalization.TextInfo textInfo = global::System.Globalization.CultureInfo.CurrentCulture.TextInfo;"); return true; } return false; } private static bool UseToLowerInvariant(bool hasTextInfo, RegexOptions options) => !hasTextInfo || (options & RegexOptions.CultureInvariant) != 0; private static string ToLower(bool hasTextInfo, RegexOptions options, string expression) => UseToLowerInvariant(hasTextInfo, options) ? $"char.ToLowerInvariant({expression})" : $"textInfo.ToLower({expression})"; private static string ToLowerIfNeeded(bool hasTextInfo, RegexOptions options, string expression, bool toLower) => toLower ? ToLower(hasTextInfo, options, expression) : expression; private static string MatchCharacterClass(bool hasTextInfo, RegexOptions options, string chExpr, string charClass, bool caseInsensitive, bool negate, HashSet<string> additionalDeclarations, ref RequiredHelperFunctions requiredHelpers) { // We need to perform the equivalent of calling RegexRunner.CharInClass(ch, charClass), // but that call is relatively expensive. Before we fall back to it, we try to optimize // some common cases for which we can do much better, such as known character classes // for which we can call a dedicated method, or a fast-path for ASCII using a lookup table. // First, see if the char class is a built-in one for which there's a better function // we can just call directly. Everything in this section must work correctly for both // case-sensitive and case-insensitive modes, regardless of culture. switch (charClass) { case RegexCharClass.AnyClass: // ideally this could just be "return true;", but we need to evaluate the expression for its side effects return $"({chExpr} {(negate ? "<" : ">=")} 0)"; // a char is unsigned and thus won't ever be negative case RegexCharClass.DigitClass: case RegexCharClass.NotDigitClass: negate ^= charClass == RegexCharClass.NotDigitClass; return $"{(negate ? "!" : "")}char.IsDigit({chExpr})"; case RegexCharClass.SpaceClass: case RegexCharClass.NotSpaceClass: negate ^= charClass == RegexCharClass.NotSpaceClass; return $"{(negate ? "!" : "")}char.IsWhiteSpace({chExpr})"; case RegexCharClass.WordClass: case RegexCharClass.NotWordClass: requiredHelpers |= RequiredHelperFunctions.IsWordChar; negate ^= charClass == RegexCharClass.NotWordClass; return $"{(negate ? "!" : "")}IsWordChar({chExpr})"; } // If we're meant to be doing a case-insensitive lookup, and if we're not using the invariant culture, // lowercase the input. If we're using the invariant culture, we may still end up calling ToLower later // on, but we may also be able to avoid it, in particular in the case of our lookup table, where we can // generate the lookup table already factoring in the invariant case sensitivity. There are multiple // special-code paths between here and the lookup table, but we only take those if invariant is false; // if it were true, they'd need to use CallToLower(). bool invariant = false; if (caseInsensitive) { invariant = UseToLowerInvariant(hasTextInfo, options); if (!invariant) { chExpr = ToLower(hasTextInfo, options, chExpr); } } // Next, handle simple sets of one range, e.g. [A-Z], [0-9], etc. This includes some built-in classes, like ECMADigitClass. if (!invariant && RegexCharClass.TryGetSingleRange(charClass, out char lowInclusive, out char highInclusive)) { negate ^= RegexCharClass.IsNegated(charClass); return lowInclusive == highInclusive ? $"({chExpr} {(negate ? "!=" : "==")} {Literal(lowInclusive)})" : $"(((uint){chExpr}) - {Literal(lowInclusive)} {(negate ? ">" : "<=")} (uint)({Literal(highInclusive)} - {Literal(lowInclusive)}))"; } // Next if the character class contains nothing but a single Unicode category, we can calle char.GetUnicodeCategory and // compare against it. It has a fast-lookup path for ASCII, so is as good or better than any lookup we'd generate (plus // we get smaller code), and it's what we'd do for the fallback (which we get to avoid generating) as part of CharInClass. if (!invariant && RegexCharClass.TryGetSingleUnicodeCategory(charClass, out UnicodeCategory category, out bool negated)) { negate ^= negated; return $"(char.GetUnicodeCategory({chExpr}) {(negate ? "!=" : "==")} global::System.Globalization.UnicodeCategory.{category})"; } // Next, if there's only 2 or 3 chars in the set (fairly common due to the sets we create for prefixes), // it may be cheaper and smaller to compare against each than it is to use a lookup table. We can also special-case // the very common case with case insensitivity of two characters next to each other being the upper and lowercase // ASCII variants of each other, in which case we can use bit manipulation to avoid a comparison. if (!invariant && !RegexCharClass.IsNegated(charClass)) { Span<char> setChars = stackalloc char[3]; int mask; switch (RegexCharClass.GetSetChars(charClass, setChars)) { case 2: if (RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out mask)) { return $"(({chExpr} | 0x{mask:X}) {(negate ? "!=" : "==")} {Literal((char)(setChars[1] | mask))})"; } additionalDeclarations.Add("char ch;"); return negate ? $"(((ch = {chExpr}) != {Literal(setChars[0])}) & (ch != {Literal(setChars[1])}))" : $"(((ch = {chExpr}) == {Literal(setChars[0])}) | (ch == {Literal(setChars[1])}))"; case 3: additionalDeclarations.Add("char ch;"); return (negate, RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out mask)) switch { (false, false) => $"(((ch = {chExpr}) == {Literal(setChars[0])}) | (ch == {Literal(setChars[1])}) | (ch == {Literal(setChars[2])}))", (true, false) => $"(((ch = {chExpr}) != {Literal(setChars[0])}) & (ch != {Literal(setChars[1])}) & (ch != {Literal(setChars[2])}))", (false, true) => $"((((ch = {chExpr}) | 0x{mask:X}) == {Literal((char)(setChars[1] | mask))}) | (ch == {Literal(setChars[2])}))", (true, true) => $"((((ch = {chExpr}) | 0x{mask:X}) != {Literal((char)(setChars[1] | mask))}) & (ch != {Literal(setChars[2])}))", }; } } // All options after this point require a ch local. additionalDeclarations.Add("char ch;"); // Analyze the character set more to determine what code to generate. RegexCharClass.CharClassAnalysisResults analysis = RegexCharClass.Analyze(charClass); if (!invariant) // if we're being asked to do a case insensitive, invariant comparison, use the lookup table { if (analysis.ContainsNoAscii) { // We determined that the character class contains only non-ASCII, // for example if the class were [\p{IsGreek}\p{IsGreekExtended}], which is // the same as [\u0370-\u03FF\u1F00-1FFF]. (In the future, we could possibly // extend the analysis to produce a known lower-bound and compare against // that rather than always using 128 as the pivot point.) return negate ? $"((ch = {chExpr}) < 128 || !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))" : $"((ch = {chExpr}) >= 128 && global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))"; } if (analysis.AllAsciiContained) { // We determined that every ASCII character is in the class, for example // if the class were the negated example from case 1 above: // [^\p{IsGreek}\p{IsGreekExtended}]. return negate ? $"((ch = {chExpr}) >= 128 && !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))" : $"((ch = {chExpr}) < 128 || global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))"; } } // Now, our big hammer is to generate a lookup table that lets us quickly index by character into a yes/no // answer as to whether the character is in the target character class. However, we don't want to store // a lookup table for every possible character for every character class in the regular expression; at one // bit for each of 65K characters, that would be an 8K bitmap per character class. Instead, we handle the // common case of ASCII input via such a lookup table, which at one bit for each of 128 characters is only // 16 bytes per character class. We of course still need to be able to handle inputs that aren't ASCII, so // we check the input against 128, and have a fallback if the input is >= to it. Determining the right // fallback could itself be expensive. For example, if it's possible that a value >= 128 could match the // character class, we output a call to RegexRunner.CharInClass, but we don't want to have to enumerate the // entire character class evaluating every character against it, just to determine whether it's a match. // Instead, we employ some quick heuristics that will always ensure we provide a correct answer even if // we could have sometimes generated better code to give that answer. // Generate the lookup table to store 128 answers as bits. We use a const string instead of a byte[] / static // data property because it lets IL emit handle all the details for us. string bitVectorString = StringExtensions.Create(8, (charClass, invariant), static (dest, state) => // String length is 8 chars == 16 bytes == 128 bits. { for (int i = 0; i < 128; i++) { char c = (char)i; bool isSet = state.invariant ? RegexCharClass.CharInClass(char.ToLowerInvariant(c), state.charClass) : RegexCharClass.CharInClass(c, state.charClass); if (isSet) { dest[i >> 4] |= (char)(1 << (i & 0xF)); } } }); // We determined that the character class may contain ASCII, so we // output the lookup against the lookup table. if (analysis.ContainsOnlyAscii) { // We know that all inputs that could match are ASCII, for example if the // character class were [A-Za-z0-9], so since the ch is now known to be >= 128, we // can just fail the comparison. return negate ? $"((ch = {chExpr}) >= 128 || ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0)" : $"((ch = {chExpr}) < 128 && ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0)"; } if (analysis.AllNonAsciiContained) { // We know that all non-ASCII inputs match, for example if the character // class were [^\r\n], so since we just determined the ch to be >= 128, we can just // give back success. return negate ? $"((ch = {chExpr}) < 128 && ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0)" : $"((ch = {chExpr}) >= 128 || ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0)"; } // We know that the whole class wasn't ASCII, and we don't know anything about the non-ASCII // characters other than that some might be included, for example if the character class // were [\w\d], so since ch >= 128, we need to fall back to calling CharInClass. return (negate, invariant) switch { (false, false) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0 : global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))", (true, false) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0 : !global::System.Text.RegularExpressions.RegexRunner.CharInClass((char)ch, {Literal(charClass)}))", (false, true) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) != 0 : global::System.Text.RegularExpressions.RegexRunner.CharInClass(char.ToLowerInvariant((char)ch), {Literal(charClass)}))", (true, true) => $"((ch = {chExpr}) < 128 ? ({Literal(bitVectorString)}[ch >> 4] & (1 << (ch & 0xF))) == 0 : !global::System.Text.RegularExpressions.RegexRunner.CharInClass(char.ToLowerInvariant((char)ch), {Literal(charClass)}))", }; } /// <summary> /// Replaces <see cref="AdditionalDeclarationsPlaceholder"/> in <paramref name="writer"/> with /// all of the variable declarations in <paramref name="declarations"/>. /// </summary> /// <param name="writer">The writer around a StringWriter to have additional declarations inserted into.</param> /// <param name="declarations">The additional declarations to insert.</param> /// <param name="position">The position into the writer at which to insert the additional declarations.</param> /// <param name="indent">The indentation to use for the additional declarations.</param> private static void ReplaceAdditionalDeclarations(IndentedTextWriter writer, HashSet<string> declarations, int position, int indent) { if (declarations.Count != 0) { var tmp = new StringBuilder(); foreach (string decl in declarations.OrderBy(s => s)) { for (int i = 0; i < indent; i++) { tmp.Append(IndentedTextWriter.DefaultTabString); } tmp.AppendLine(decl); } ((StringWriter)writer.InnerWriter).GetStringBuilder().Insert(position, tmp.ToString()); } } /// <summary>Formats the character as valid C#.</summary> private static string Literal(char c) => SymbolDisplay.FormatLiteral(c, quote: true); /// <summary>Formats the string as valid C#.</summary> private static string Literal(string s) => SymbolDisplay.FormatLiteral(s, quote: true); private static string Literal(RegexOptions options) { string s = options.ToString(); if (int.TryParse(s, out _)) { // The options were formatted as an int, which means the runtime couldn't // produce a textual representation. So just output casting the value as an int. return $"(global::System.Text.RegularExpressions.RegexOptions)({(int)options})"; } // Parse the runtime-generated "Option1, Option2" into each piece and then concat // them back together. string[] parts = s.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); for (int i = 0; i < parts.Length; i++) { parts[i] = "global::System.Text.RegularExpressions.RegexOptions." + parts[i].Trim(); } return string.Join(" | ", parts); } /// <summary>Gets a textual description of the node fit for rendering in a comment in source.</summary> private static string DescribeNode(RegexNode node, AnalysisResults analysis) => node.Kind switch { RegexNodeKind.Alternate => $"Match with {node.ChildCount()} alternative expressions{(analysis.IsAtomicByAncestor(node) ? ", atomically" : "")}.", RegexNodeKind.Atomic => $"Atomic group.", RegexNodeKind.Beginning => "Match if at the beginning of the string.", RegexNodeKind.Bol => "Match if at the beginning of a line.", RegexNodeKind.Boundary => $"Match if at a word boundary.", RegexNodeKind.Capture when node.M == -1 && node.N != -1 => $"Non-capturing balancing group. Uncaptures the {DescribeCapture(node.N, analysis)}.", RegexNodeKind.Capture when node.N != -1 => $"Balancing group. Captures the {DescribeCapture(node.M, analysis)} and uncaptures the {DescribeCapture(node.N, analysis)}.", RegexNodeKind.Capture when node.N == -1 => $"{DescribeCapture(node.M, analysis)}.", RegexNodeKind.Concatenate => "Match a sequence of expressions.", RegexNodeKind.ECMABoundary => $"Match if at a word boundary (according to ECMAScript rules).", RegexNodeKind.Empty => $"Match an empty string.", RegexNodeKind.End => "Match if at the end of the string.", RegexNodeKind.EndZ => "Match if at the end of the string or if before an ending newline.", RegexNodeKind.Eol => "Match if at the end of a line.", RegexNodeKind.Loop or RegexNodeKind.Lazyloop => node.M == 0 && node.N == 1 ? $"Optional ({(node.Kind is RegexNodeKind.Loop ? "greedy" : "lazy")})." : $"Loop {DescribeLoop(node, analysis)}.", RegexNodeKind.Multi => $"Match the string {Literal(node.Str!)}.", RegexNodeKind.NonBoundary => $"Match if at anything other than a word boundary.", RegexNodeKind.NonECMABoundary => $"Match if at anything other than a word boundary (according to ECMAScript rules).", RegexNodeKind.Nothing => $"Fail to match.", RegexNodeKind.Notone => $"Match any character other than {Literal(node.Ch)}.", RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy => $"Match a character other than {Literal(node.Ch)} {DescribeLoop(node, analysis)}.", RegexNodeKind.One => $"Match {Literal(node.Ch)}.", RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy => $"Match {Literal(node.Ch)} {DescribeLoop(node, analysis)}.", RegexNodeKind.NegativeLookaround => $"Zero-width negative lookahead assertion.", RegexNodeKind.Backreference => $"Match the same text as matched by the {DescribeCapture(node.M, analysis)}.", RegexNodeKind.PositiveLookaround => $"Zero-width positive lookahead assertion.", RegexNodeKind.Set => $"Match {DescribeSet(node.Str!)}.", RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy => $"Match {DescribeSet(node.Str!)} {DescribeLoop(node, analysis)}.", RegexNodeKind.Start => "Match if at the start position.", RegexNodeKind.ExpressionConditional => $"Conditionally match one of two expressions depending on whether an initial expression matches.", RegexNodeKind.BackreferenceConditional => $"Conditionally match one of two expressions depending on whether the {DescribeCapture(node.M, analysis)} matched.", RegexNodeKind.UpdateBumpalong => $"Advance the next matching position.", _ => $"Unknown node type {node.Kind}", }; /// <summary>Gets an identifer to describe a capture group.</summary> private static string DescribeCapture(int capNum, AnalysisResults analysis) { // If we can get a capture name from the captures collection and it's not just a numerical representation of the group, use it. string name = RegexParser.GroupNameFromNumber(analysis.RegexTree.CaptureNumberSparseMapping, analysis.RegexTree.CaptureNames, analysis.RegexTree.CaptureCount, capNum); if (!string.IsNullOrEmpty(name) && (!int.TryParse(name, out int id) || id != capNum)) { name = Literal(name); } else { // Otherwise, create a numerical description of the capture group. int tens = capNum % 10; name = tens is >= 1 and <= 3 && capNum % 100 is < 10 or > 20 ? // Ends in 1, 2, 3 but not 11, 12, or 13 tens switch { 1 => $"{capNum}st", 2 => $"{capNum}nd", _ => $"{capNum}rd", } : $"{capNum}th"; } return $"{name} capture group"; } /// <summary>Gets a textual description of what characters match a set.</summary> private static string DescribeSet(string charClass) => charClass switch { RegexCharClass.AnyClass => "any character", RegexCharClass.DigitClass => "a Unicode digit", RegexCharClass.ECMADigitClass => "'0' through '9'", RegexCharClass.ECMASpaceClass => "a whitespace character (ECMA)", RegexCharClass.ECMAWordClass => "a word character (ECMA)", RegexCharClass.NotDigitClass => "any character other than a Unicode digit", RegexCharClass.NotECMADigitClass => "any character other than '0' through '9'", RegexCharClass.NotECMASpaceClass => "any character other than a space character (ECMA)", RegexCharClass.NotECMAWordClass => "any character other than a word character (ECMA)", RegexCharClass.NotSpaceClass => "any character other than a space character", RegexCharClass.NotWordClass => "any character other than a word character", RegexCharClass.SpaceClass => "a whitespace character", RegexCharClass.WordClass => "a word character", _ => $"a character in the set {RegexCharClass.DescribeSet(charClass)}", }; /// <summary>Writes a textual description of the node tree fit for rending in source.</summary> /// <param name="writer">The writer to which the description should be written.</param> /// <param name="node">The node being written.</param> /// <param name="prefix">The prefix to write at the beginning of every line, including a "//" for a comment.</param> /// <param name="analyses">Analysis of the tree</param> /// <param name="depth">The depth of the current node.</param> private static void DescribeExpression(TextWriter writer, RegexNode node, string prefix, AnalysisResults analysis, int depth = 0) { bool skip = node.Kind switch { // For concatenations, flatten the contents into the parent, but only if the parent isn't a form of alternation, // where each branch is considered to be independent rather than a concatenation. RegexNodeKind.Concatenate when node.Parent is not { Kind: RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional } => true, // For atomic, skip the node if we'll instead render the atomic label as part of rendering the child. RegexNodeKind.Atomic when node.Child(0).Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop or RegexNodeKind.Alternate => true, // Don't skip anything else. _ => false, }; if (!skip) { string tag = node.Parent?.Kind switch { RegexNodeKind.ExpressionConditional when node.Parent.Child(0) == node => "Condition: ", RegexNodeKind.ExpressionConditional when node.Parent.Child(1) == node => "Matched: ", RegexNodeKind.ExpressionConditional when node.Parent.Child(2) == node => "Not Matched: ", RegexNodeKind.BackreferenceConditional when node.Parent.Child(0) == node => "Matched: ", RegexNodeKind.BackreferenceConditional when node.Parent.Child(1) == node => "Not Matched: ", _ => "", }; // Write out the line for the node. const char BulletPoint = '\u25CB'; writer.WriteLine($"{prefix}{new string(' ', depth * 4)}{BulletPoint} {tag}{DescribeNode(node, analysis)}"); } // Recur into each of its children. int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { int childDepth = skip ? depth : depth + 1; DescribeExpression(writer, node.Child(i), prefix, analysis, childDepth); } } /// <summary>Gets a textual description of a loop's style and bounds.</summary> private static string DescribeLoop(RegexNode node, AnalysisResults analysis) { string style = node.Kind switch { _ when node.M == node.N => "exactly", RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloopatomic => "atomically", RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop => "greedily", RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy => "lazily", RegexNodeKind.Loop => analysis.IsAtomicByAncestor(node) ? "greedily and atomically" : "greedily", _ /* RegexNodeKind.Lazyloop */ => analysis.IsAtomicByAncestor(node) ? "lazily and atomically" : "lazily", }; string bounds = node.M == node.N ? $" {node.M} times" : (node.M, node.N) switch { (0, int.MaxValue) => " any number of times", (1, int.MaxValue) => " at least once", (2, int.MaxValue) => " at least twice", (_, int.MaxValue) => $" at least {node.M} times", (0, 1) => ", optionally", (0, _) => $" at most {node.N} times", _ => $" at least {node.M} and at most {node.N} times" }; return style + bounds; } private static FinishEmitScope EmitScope(IndentedTextWriter writer, string title, bool faux = false) => EmitBlock(writer, $"// {title}", faux: faux); private static FinishEmitScope EmitBlock(IndentedTextWriter writer, string? clause, bool faux = false) { if (clause is not null) { writer.WriteLine(clause); } writer.WriteLine(faux ? "//{" : "{"); writer.Indent++; return new FinishEmitScope(writer, faux); } private static void EmitAdd(IndentedTextWriter writer, string variable, int value) { if (value == 0) { return; } writer.WriteLine( value == 1 ? $"{variable}++;" : value == -1 ? $"{variable}--;" : value > 0 ? $"{variable} += {value};" : value < 0 && value > int.MinValue ? $"{variable} -= {-value};" : $"{variable} += {value.ToString(CultureInfo.InvariantCulture)};"); } private readonly struct FinishEmitScope : IDisposable { private readonly IndentedTextWriter _writer; private readonly bool _faux; public FinishEmitScope(IndentedTextWriter writer, bool faux) { _writer = writer; _faux = faux; } public void Dispose() { if (_writer is not null) { _writer.Indent--; _writer.WriteLine(_faux ? "//}" : "}"); } } } /// <summary>Bit flags indicating which additional helpers should be emitted into the regex class.</summary> [Flags] private enum RequiredHelperFunctions { /// <summary>No additional functions are required.</summary> None = 0b0, /// <summary>The IsWordChar helper is required.</summary> IsWordChar = 0b1, /// <summary>The IsBoundary helper is required.</summary> IsBoundary = 0b10, /// <summary>The IsECMABoundary helper is required.</summary> IsECMABoundary = 0b100 } } }
1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexCompiler.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices; using System.Threading; namespace System.Text.RegularExpressions { /// <summary> /// RegexCompiler translates a block of RegexCode to MSIL, and creates a subclass of the RegexRunner type. /// </summary> internal abstract class RegexCompiler { private static readonly FieldInfo s_runtextstartField = RegexRunnerField("runtextstart"); private static readonly FieldInfo s_runtextposField = RegexRunnerField("runtextpos"); private static readonly FieldInfo s_runstackField = RegexRunnerField("runstack"); private static readonly MethodInfo s_captureMethod = RegexRunnerMethod("Capture"); private static readonly MethodInfo s_transferCaptureMethod = RegexRunnerMethod("TransferCapture"); private static readonly MethodInfo s_uncaptureMethod = RegexRunnerMethod("Uncapture"); private static readonly MethodInfo s_isMatchedMethod = RegexRunnerMethod("IsMatched"); private static readonly MethodInfo s_matchLengthMethod = RegexRunnerMethod("MatchLength"); private static readonly MethodInfo s_matchIndexMethod = RegexRunnerMethod("MatchIndex"); private static readonly MethodInfo s_isBoundaryMethod = typeof(RegexRunner).GetMethod("IsBoundary", BindingFlags.NonPublic | BindingFlags.Instance, new[] { typeof(ReadOnlySpan<char>), typeof(int) })!; private static readonly MethodInfo s_isWordCharMethod = RegexRunnerMethod("IsWordChar"); private static readonly MethodInfo s_isECMABoundaryMethod = typeof(RegexRunner).GetMethod("IsECMABoundary", BindingFlags.NonPublic | BindingFlags.Instance, new[] { typeof(ReadOnlySpan<char>), typeof(int) })!; private static readonly MethodInfo s_crawlposMethod = RegexRunnerMethod("Crawlpos"); private static readonly MethodInfo s_charInClassMethod = RegexRunnerMethod("CharInClass"); private static readonly MethodInfo s_checkTimeoutMethod = RegexRunnerMethod("CheckTimeout"); private static readonly MethodInfo s_charIsDigitMethod = typeof(char).GetMethod("IsDigit", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charIsWhiteSpaceMethod = typeof(char).GetMethod("IsWhiteSpace", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charGetUnicodeInfo = typeof(char).GetMethod("GetUnicodeCategory", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charToLowerInvariantMethod = typeof(char).GetMethod("ToLowerInvariant", new Type[] { typeof(char) })!; private static readonly MethodInfo s_cultureInfoGetCurrentCultureMethod = typeof(CultureInfo).GetMethod("get_CurrentCulture")!; private static readonly MethodInfo s_cultureInfoGetTextInfoMethod = typeof(CultureInfo).GetMethod("get_TextInfo")!; private static readonly MethodInfo s_spanGetItemMethod = typeof(ReadOnlySpan<char>).GetMethod("get_Item", new Type[] { typeof(int) })!; private static readonly MethodInfo s_spanGetLengthMethod = typeof(ReadOnlySpan<char>).GetMethod("get_Length")!; private static readonly MethodInfo s_memoryMarshalGetReference = typeof(MemoryMarshal).GetMethod("GetReference", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfChar = typeof(MemoryExtensions).GetMethod("IndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfSpan = typeof(MemoryExtensions).GetMethod("IndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnyCharChar = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnyCharCharChar = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnySpan = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfChar = typeof(MemoryExtensions).GetMethod("LastIndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnyCharChar = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnyCharCharChar = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnySpan = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfSpan = typeof(MemoryExtensions).GetMethod("LastIndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanSliceIntMethod = typeof(ReadOnlySpan<char>).GetMethod("Slice", new Type[] { typeof(int) })!; private static readonly MethodInfo s_spanSliceIntIntMethod = typeof(ReadOnlySpan<char>).GetMethod("Slice", new Type[] { typeof(int), typeof(int) })!; private static readonly MethodInfo s_spanStartsWith = typeof(MemoryExtensions).GetMethod("StartsWith", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_stringAsSpanMethod = typeof(MemoryExtensions).GetMethod("AsSpan", new Type[] { typeof(string) })!; private static readonly MethodInfo s_stringGetCharsMethod = typeof(string).GetMethod("get_Chars", new Type[] { typeof(int) })!; private static readonly MethodInfo s_textInfoToLowerMethod = typeof(TextInfo).GetMethod("ToLower", new Type[] { typeof(char) })!; private static readonly MethodInfo s_arrayResize = typeof(Array).GetMethod("Resize")!.MakeGenericMethod(typeof(int)); private static readonly MethodInfo s_mathMinIntInt = typeof(Math).GetMethod("Min", new Type[] { typeof(int), typeof(int) })!; /// <summary>The ILGenerator currently in use.</summary> protected ILGenerator? _ilg; /// <summary>The options for the expression.</summary> protected RegexOptions _options; /// <summary>The <see cref="RegexTree"/> written for the expression.</summary> protected RegexTree? _regexTree; /// <summary>Whether this expression has a non-infinite timeout.</summary> protected bool _hasTimeout; /// <summary>Pool of Int32 LocalBuilders.</summary> private Stack<LocalBuilder>? _int32LocalsPool; /// <summary>Pool of ReadOnlySpan of char locals.</summary> private Stack<LocalBuilder>? _readOnlySpanCharLocalsPool; /// <summary>Local representing a cached TextInfo for the culture to use for all case-insensitive operations.</summary> private LocalBuilder? _textInfo; /// <summary>Local representing a timeout counter for loops (set loops and node loops).</summary> private LocalBuilder? _loopTimeoutCounter; /// <summary>A frequency with which the timeout should be validated.</summary> private const int LoopTimeoutCheckCount = 2048; private static FieldInfo RegexRunnerField(string fieldname) => typeof(RegexRunner).GetField(fieldname, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static)!; private static MethodInfo RegexRunnerMethod(string methname) => typeof(RegexRunner).GetMethod(methname, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static)!; /// <summary> /// Entry point to dynamically compile a regular expression. The expression is compiled to /// an in-memory assembly. /// </summary> internal static RegexRunnerFactory? Compile(string pattern, RegexTree regexTree, RegexOptions options, bool hasTimeout) => new RegexLWCGCompiler().FactoryInstanceFromCode(pattern, regexTree, options, hasTimeout); /// <summary>A macro for _ilg.DefineLabel</summary> private Label DefineLabel() => _ilg!.DefineLabel(); /// <summary>A macro for _ilg.MarkLabel</summary> private void MarkLabel(Label l) => _ilg!.MarkLabel(l); /// <summary>A macro for _ilg.Emit(Opcodes.Ldstr, str)</summary> protected void Ldstr(string str) => _ilg!.Emit(OpCodes.Ldstr, str); /// <summary>A macro for the various forms of Ldc.</summary> protected void Ldc(int i) => _ilg!.Emit(OpCodes.Ldc_I4, i); /// <summary>A macro for _ilg.Emit(OpCodes.Ldc_I8).</summary> protected void LdcI8(long i) => _ilg!.Emit(OpCodes.Ldc_I8, i); /// <summary>A macro for _ilg.Emit(OpCodes.Ret).</summary> protected void Ret() => _ilg!.Emit(OpCodes.Ret); /// <summary>A macro for _ilg.Emit(OpCodes.Dup).</summary> protected void Dup() => _ilg!.Emit(OpCodes.Dup); /// <summary>A macro for _ilg.Emit(OpCodes.Rem_Un).</summary> private void RemUn() => _ilg!.Emit(OpCodes.Rem_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Ceq).</summary> private void Ceq() => _ilg!.Emit(OpCodes.Ceq); /// <summary>A macro for _ilg.Emit(OpCodes.Cgt_Un).</summary> private void CgtUn() => _ilg!.Emit(OpCodes.Cgt_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Clt_Un).</summary> private void CltUn() => _ilg!.Emit(OpCodes.Clt_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Pop).</summary> private void Pop() => _ilg!.Emit(OpCodes.Pop); /// <summary>A macro for _ilg.Emit(OpCodes.Add).</summary> private void Add() => _ilg!.Emit(OpCodes.Add); /// <summary>A macro for _ilg.Emit(OpCodes.Sub).</summary> private void Sub() => _ilg!.Emit(OpCodes.Sub); /// <summary>A macro for _ilg.Emit(OpCodes.Mul).</summary> private void Mul() => _ilg!.Emit(OpCodes.Mul); /// <summary>A macro for _ilg.Emit(OpCodes.And).</summary> private void And() => _ilg!.Emit(OpCodes.And); /// <summary>A macro for _ilg.Emit(OpCodes.Or).</summary> private void Or() => _ilg!.Emit(OpCodes.Or); /// <summary>A macro for _ilg.Emit(OpCodes.Shl).</summary> private void Shl() => _ilg!.Emit(OpCodes.Shl); /// <summary>A macro for _ilg.Emit(OpCodes.Shr).</summary> private void Shr() => _ilg!.Emit(OpCodes.Shr); /// <summary>A macro for _ilg.Emit(OpCodes.Ldloc).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Ldloc(LocalBuilder lt) => _ilg!.Emit(OpCodes.Ldloc, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldloca).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Ldloca(LocalBuilder lt) => _ilg!.Emit(OpCodes.Ldloca, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_U2).</summary> private void LdindU2() => _ilg!.Emit(OpCodes.Ldind_U2); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_I4).</summary> private void LdindI4() => _ilg!.Emit(OpCodes.Ldind_I4); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_I8).</summary> private void LdindI8() => _ilg!.Emit(OpCodes.Ldind_I8); /// <summary>A macro for _ilg.Emit(OpCodes.Unaligned).</summary> private void Unaligned(byte alignment) => _ilg!.Emit(OpCodes.Unaligned, alignment); /// <summary>A macro for _ilg.Emit(OpCodes.Stloc).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Stloc(LocalBuilder lt) => _ilg!.Emit(OpCodes.Stloc, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldarg_0).</summary> protected void Ldthis() => _ilg!.Emit(OpCodes.Ldarg_0); /// <summary>A macro for _ilgEmit(OpCodes.Ldarg_1) </summary> private void Ldarg_1() => _ilg!.Emit(OpCodes.Ldarg_1); /// <summary>A macro for Ldthis(); Ldfld();</summary> protected void Ldthisfld(FieldInfo ft) { Ldthis(); _ilg!.Emit(OpCodes.Ldfld, ft); } /// <summary>Fetches the address of argument in passed in <paramref name="position"/></summary> /// <param name="position">The position of the argument which address needs to be fetched.</param> private void Ldarga_s(int position) => _ilg!.Emit(OpCodes.Ldarga_S, position); /// <summary>A macro for Ldthis(); Ldfld(); Stloc();</summary> private void Mvfldloc(FieldInfo ft, LocalBuilder lt) { Ldthisfld(ft); Stloc(lt); } /// <summary>A macro for _ilg.Emit(OpCodes.Stfld).</summary> protected void Stfld(FieldInfo ft) => _ilg!.Emit(OpCodes.Stfld, ft); /// <summary>A macro for _ilg.Emit(OpCodes.Callvirt, mt).</summary> protected void Callvirt(MethodInfo mt) => _ilg!.Emit(OpCodes.Callvirt, mt); /// <summary>A macro for _ilg.Emit(OpCodes.Call, mt).</summary> protected void Call(MethodInfo mt) => _ilg!.Emit(OpCodes.Call, mt); /// <summary>A macro for _ilg.Emit(OpCodes.Brfalse) (long form).</summary> private void BrfalseFar(Label l) => _ilg!.Emit(OpCodes.Brfalse, l); /// <summary>A macro for _ilg.Emit(OpCodes.Brtrue) (long form).</summary> private void BrtrueFar(Label l) => _ilg!.Emit(OpCodes.Brtrue, l); /// <summary>A macro for _ilg.Emit(OpCodes.Br) (long form).</summary> private void BrFar(Label l) => _ilg!.Emit(OpCodes.Br, l); /// <summary>A macro for _ilg.Emit(OpCodes.Ble) (long form).</summary> private void BleFar(Label l) => _ilg!.Emit(OpCodes.Ble, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt) (long form).</summary> private void BltFar(Label l) => _ilg!.Emit(OpCodes.Blt, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt_Un) (long form).</summary> private void BltUnFar(Label l) => _ilg!.Emit(OpCodes.Blt_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge) (long form).</summary> private void BgeFar(Label l) => _ilg!.Emit(OpCodes.Bge, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_Un) (long form).</summary> private void BgeUnFar(Label l) => _ilg!.Emit(OpCodes.Bge_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bne) (long form).</summary> private void BneFar(Label l) => _ilg!.Emit(OpCodes.Bne_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Beq) (long form).</summary> private void BeqFar(Label l) => _ilg!.Emit(OpCodes.Beq, l); /// <summary>A macro for _ilg.Emit(OpCodes.Brtrue_S) (short jump).</summary> private void Brtrue(Label l) => _ilg!.Emit(OpCodes.Brtrue_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Br_S) (short jump).</summary> private void Br(Label l) => _ilg!.Emit(OpCodes.Br_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Ble_S) (short jump).</summary> private void Ble(Label l) => _ilg!.Emit(OpCodes.Ble_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt_S) (short jump).</summary> private void Blt(Label l) => _ilg!.Emit(OpCodes.Blt_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_S) (short jump).</summary> private void Bge(Label l) => _ilg!.Emit(OpCodes.Bge_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_Un_S) (short jump).</summary> private void BgeUn(Label l) => _ilg!.Emit(OpCodes.Bge_Un_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bgt_S) (short jump).</summary> private void Bgt(Label l) => _ilg!.Emit(OpCodes.Bgt_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bne_S) (short jump).</summary> private void Bne(Label l) => _ilg!.Emit(OpCodes.Bne_Un_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Beq_S) (short jump).</summary> private void Beq(Label l) => _ilg!.Emit(OpCodes.Beq_S, l); /// <summary>A macro for the Ldlen instruction.</summary> private void Ldlen() => _ilg!.Emit(OpCodes.Ldlen); /// <summary>A macro for the Ldelem_I4 instruction.</summary> private void LdelemI4() => _ilg!.Emit(OpCodes.Ldelem_I4); /// <summary>A macro for the Stelem_I4 instruction.</summary> private void StelemI4() => _ilg!.Emit(OpCodes.Stelem_I4); private void Switch(Label[] table) => _ilg!.Emit(OpCodes.Switch, table); /// <summary>Declares a local bool.</summary> private LocalBuilder DeclareBool() => _ilg!.DeclareLocal(typeof(bool)); /// <summary>Declares a local int.</summary> private LocalBuilder DeclareInt32() => _ilg!.DeclareLocal(typeof(int)); /// <summary>Declares a local CultureInfo.</summary> private LocalBuilder? DeclareTextInfo() => _ilg!.DeclareLocal(typeof(TextInfo)); /// <summary>Declares a local string.</summary> private LocalBuilder DeclareString() => _ilg!.DeclareLocal(typeof(string)); private LocalBuilder DeclareReadOnlySpanChar() => _ilg!.DeclareLocal(typeof(ReadOnlySpan<char>)); /// <summary>Rents an Int32 local variable slot from the pool of locals.</summary> /// <remarks> /// Care must be taken to Dispose of the returned <see cref="RentedLocalBuilder"/> when it's no longer needed, /// and also not to jump into the middle of a block involving a rented local from outside of that block. /// </remarks> private RentedLocalBuilder RentInt32Local() => new RentedLocalBuilder( _int32LocalsPool ??= new Stack<LocalBuilder>(), _int32LocalsPool.TryPop(out LocalBuilder? iterationLocal) ? iterationLocal : DeclareInt32()); /// <summary>Rents a ReadOnlySpan(char) local variable slot from the pool of locals.</summary> /// <remarks> /// Care must be taken to Dispose of the returned <see cref="RentedLocalBuilder"/> when it's no longer needed, /// and also not to jump into the middle of a block involving a rented local from outside of that block. /// </remarks> private RentedLocalBuilder RentReadOnlySpanCharLocal() => new RentedLocalBuilder( _readOnlySpanCharLocalsPool ??= new Stack<LocalBuilder>(1), // capacity == 1 as we currently don't expect overlapping instances _readOnlySpanCharLocalsPool.TryPop(out LocalBuilder? iterationLocal) ? iterationLocal : DeclareReadOnlySpanChar()); /// <summary>Returned a rented local to the pool.</summary> private struct RentedLocalBuilder : IDisposable { private readonly Stack<LocalBuilder> _pool; private readonly LocalBuilder _local; internal RentedLocalBuilder(Stack<LocalBuilder> pool, LocalBuilder local) { _local = local; _pool = pool; } public static implicit operator LocalBuilder(RentedLocalBuilder local) => local._local; public void Dispose() { Debug.Assert(_pool != null); Debug.Assert(_local != null); Debug.Assert(!_pool.Contains(_local)); _pool.Push(_local); this = default; } } /// <summary>Sets the culture local to CultureInfo.CurrentCulture.</summary> private void InitLocalCultureInfo() { Debug.Assert(_textInfo != null); Call(s_cultureInfoGetCurrentCultureMethod); Callvirt(s_cultureInfoGetTextInfoMethod); Stloc(_textInfo); } /// <summary>Whether ToLower operations should be performed with the invariant culture as opposed to the one in <see cref="_textInfo"/>.</summary> private bool UseToLowerInvariant => _textInfo == null || (_options & RegexOptions.CultureInvariant) != 0; /// <summary>Invokes either char.ToLowerInvariant(c) or _textInfo.ToLower(c).</summary> private void CallToLower() { if (UseToLowerInvariant) { Call(s_charToLowerInvariantMethod); } else { using RentedLocalBuilder currentCharLocal = RentInt32Local(); Stloc(currentCharLocal); Ldloc(_textInfo!); Ldloc(currentCharLocal); Callvirt(s_textInfoToLowerMethod); } } /// <summary>Generates the implementation for TryFindNextPossibleStartingPosition.</summary> protected void EmitTryFindNextPossibleStartingPosition() { Debug.Assert(_regexTree != null); _int32LocalsPool?.Clear(); _readOnlySpanCharLocalsPool?.Clear(); LocalBuilder inputSpan = DeclareReadOnlySpanChar(); LocalBuilder pos = DeclareInt32(); _textInfo = null; if ((_options & RegexOptions.CultureInvariant) == 0) { bool needsCulture = _regexTree.FindOptimizations.FindMode switch { FindNextStartingPositionMode.FixedLiteral_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive => true, _ when _regexTree.FindOptimizations.FixedDistanceSets is List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)> sets => sets.Exists(set => set.CaseInsensitive), _ => false, }; if (needsCulture) { _textInfo = DeclareTextInfo(); InitLocalCultureInfo(); } } // Load necessary locals // int pos = base.runtextpos; // ReadOnlySpan<char> inputSpan = dynamicMethodArg; // TODO: We can reference the arg directly rather than using another local. Mvfldloc(s_runtextposField, pos); Ldarg_1(); Stloc(inputSpan); // Generate length check. If the input isn't long enough to possibly match, fail quickly. // It's rare for min required length to be 0, so we don't bother special-casing the check, // especially since we want the "return false" code regardless. int minRequiredLength = _regexTree.FindOptimizations.MinRequiredLength; Debug.Assert(minRequiredLength >= 0); Label returnFalse = DefineLabel(); Label finishedLengthCheck = DefineLabel(); // if (pos > inputSpan.Length - _code.Tree.MinRequiredLength) // { // base.runtextpos = inputSpan.Length; // return false; // } Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); if (minRequiredLength > 0) { Ldc(minRequiredLength); Sub(); } Ble(finishedLengthCheck); MarkLabel(returnFalse); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Stfld(s_runtextposField); Ldc(0); Ret(); MarkLabel(finishedLengthCheck); // Emit any anchors. if (GenerateAnchors()) { return; } // Either anchors weren't specified, or they don't completely root all matches to a specific location. switch (_regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingPrefix_LeftToRight_CaseSensitive: Debug.Assert(!string.IsNullOrEmpty(_regexTree.FindOptimizations.LeadingCaseSensitivePrefix)); EmitIndexOf_LeftToRight(_regexTree.FindOptimizations.LeadingCaseSensitivePrefix); break; case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive: Debug.Assert(_regexTree.FindOptimizations.FixedDistanceSets is { Count: > 0 }); EmitFixedSet_LeftToRight(); break; case FindNextStartingPositionMode.LiteralAfterLoop_LeftToRight_CaseSensitive: Debug.Assert(_regexTree.FindOptimizations.LiteralAfterLoop is not null); EmitLiteralAfterAtomicLoop(); break; default: Debug.Fail($"Unexpected mode: {_regexTree.FindOptimizations.FindMode}"); goto case FindNextStartingPositionMode.NoSearch; case FindNextStartingPositionMode.NoSearch: // return true; Ldc(1); Ret(); break; } // Emits any anchors. Returns true if the anchor roots any match to a specific location and thus no further // searching is required; otherwise, false. bool GenerateAnchors() { Label label; // Anchors that fully implement TryFindNextPossibleStartingPosition, with a check that leads to immediate success or failure determination. switch (_regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Beginning: label = DefineLabel(); Ldloc(pos); Ldc(0); Ble(label); Br(returnFalse); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Start: label = DefineLabel(); Ldloc(pos); Ldthisfld(s_runtextstartField); Ble(label); Br(returnFalse); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_EndZ: label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(1); Sub(); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(1); Sub(); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_End: label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_End: case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ: // Jump to the end, minus the min required length, which in this case is actually the fixed length. { int extraNewlineBump = _regexTree.FindOptimizations.FindMode == FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ ? 1 : 0; label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(_regexTree.FindOptimizations.MinRequiredLength + extraNewlineBump); Sub(); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(_regexTree.FindOptimizations.MinRequiredLength + extraNewlineBump); Sub(); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; } } // Now handle anchors that boost the position but don't determine immediate success or failure. switch (_regexTree.FindOptimizations.LeadingAnchor) { case RegexNodeKind.Bol: { // Optimize the handling of a Beginning-Of-Line (BOL) anchor. BOL is special, in that unlike // other anchors like Beginning, there are potentially multiple places a BOL can match. So unlike // the other anchors, which all skip all subsequent processing if found, with BOL we just use it // to boost our position to the next line, and then continue normally with any prefix or char class searches. label = DefineLabel(); // if (pos > 0... Ldloc(pos!); Ldc(0); Ble(label); // ... && inputSpan[pos - 1] != '\n') { ... } Ldloca(inputSpan); Ldloc(pos); Ldc(1); Sub(); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); Beq(label); // int tmp = inputSpan.Slice(pos).IndexOf('\n'); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Ldc('\n'); Call(s_spanIndexOfChar); using (RentedLocalBuilder newlinePos = RentInt32Local()) { Stloc(newlinePos); // if (newlinePos < 0 || newlinePos + pos + 1 > inputSpan.Length) // { // base.runtextpos = inputSpan.Length; // return false; // } Ldloc(newlinePos); Ldc(0); Blt(returnFalse); Ldloc(newlinePos); Ldloc(pos); Add(); Ldc(1); Add(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Bgt(returnFalse); // pos += newlinePos + 1; Ldloc(pos); Ldloc(newlinePos); Add(); Ldc(1); Add(); Stloc(pos); } MarkLabel(label); } break; } switch (_regexTree.FindOptimizations.TrailingAnchor) { case RegexNodeKind.End or RegexNodeKind.EndZ when _regexTree.FindOptimizations.MaxPossibleLength is int maxLength: // Jump to the end, minus the max allowed length. { int extraNewlineBump = _regexTree.FindOptimizations.FindMode == FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ ? 1 : 0; label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(maxLength + extraNewlineBump); Sub(); Bge(label); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(maxLength + extraNewlineBump); Sub(); Stloc(pos); MarkLabel(label); break; } } return false; } void EmitIndexOf_LeftToRight(string prefix) { using RentedLocalBuilder i = RentInt32Local(); // int i = inputSpan.Slice(pos).IndexOf(prefix); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Ldstr(prefix); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); Stloc(i); // if (i < 0) goto ReturnFalse; Ldloc(i); Ldc(0); BltFar(returnFalse); // base.runtextpos = pos + i; // return true; Ldthis(); Ldloc(pos); Ldloc(i); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); } void EmitFixedSet_LeftToRight() { List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)>? sets = _regexTree.FindOptimizations.FixedDistanceSets; (char[]? Chars, string Set, int Distance, bool CaseInsensitive) primarySet = sets![0]; const int MaxSets = 4; int setsToUse = Math.Min(sets.Count, MaxSets); using RentedLocalBuilder iLocal = RentInt32Local(); using RentedLocalBuilder textSpanLocal = RentReadOnlySpanCharLocal(); // ReadOnlySpan<char> span = inputSpan.Slice(pos); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(textSpanLocal); // If we can use IndexOf{Any}, try to accelerate the skip loop via vectorization to match the first prefix. // We can use it if this is a case-sensitive class with a small number of characters in the class. int setIndex = 0; bool canUseIndexOf = !primarySet.CaseInsensitive && primarySet.Chars is not null; bool needLoop = !canUseIndexOf || setsToUse > 1; Label checkSpanLengthLabel = default; Label charNotInClassLabel = default; Label loopBody = default; if (needLoop) { checkSpanLengthLabel = DefineLabel(); charNotInClassLabel = DefineLabel(); loopBody = DefineLabel(); // for (int i = 0; Ldc(0); Stloc(iLocal); BrFar(checkSpanLengthLabel); MarkLabel(loopBody); } if (canUseIndexOf) { setIndex = 1; if (needLoop) { // slice.Slice(iLocal + primarySet.Distance); Ldloca(textSpanLocal); Ldloc(iLocal); if (primarySet.Distance != 0) { Ldc(primarySet.Distance); Add(); } Call(s_spanSliceIntMethod); } else if (primarySet.Distance != 0) { // slice.Slice(primarySet.Distance) Ldloca(textSpanLocal); Ldc(primarySet.Distance); Call(s_spanSliceIntMethod); } else { // slice Ldloc(textSpanLocal); } switch (primarySet.Chars!.Length) { case 1: // tmp = ...IndexOf(setChars[0]); Ldc(primarySet.Chars[0]); Call(s_spanIndexOfChar); break; case 2: // tmp = ...IndexOfAny(setChars[0], setChars[1]); Ldc(primarySet.Chars[0]); Ldc(primarySet.Chars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: // tmp = ...IndexOfAny(setChars[0], setChars[1], setChars[2]}); Ldc(primarySet.Chars[0]); Ldc(primarySet.Chars[1]); Ldc(primarySet.Chars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(new string(primarySet.Chars)); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } if (needLoop) { // i += tmp; // if (tmp < 0) goto returnFalse; using (RentedLocalBuilder tmp = RentInt32Local()) { Stloc(tmp); Ldloc(iLocal); Ldloc(tmp); Add(); Stloc(iLocal); Ldloc(tmp); Ldc(0); BltFar(returnFalse); } } else { // i = tmp; // if (i < 0) goto returnFalse; Stloc(iLocal); Ldloc(iLocal); Ldc(0); BltFar(returnFalse); } // if (i >= slice.Length - (minRequiredLength - 1)) goto returnFalse; if (sets.Count > 1) { Debug.Assert(needLoop); Ldloca(textSpanLocal); Call(s_spanGetLengthMethod); Ldc(minRequiredLength - 1); Sub(); Ldloc(iLocal); BleFar(returnFalse); } } // if (!CharInClass(slice[i], prefix[0], "...")) continue; // if (!CharInClass(slice[i + 1], prefix[1], "...")) continue; // if (!CharInClass(slice[i + 2], prefix[2], "...")) continue; // ... Debug.Assert(setIndex is 0 or 1); for ( ; setIndex < sets.Count; setIndex++) { Debug.Assert(needLoop); Ldloca(textSpanLocal); Ldloc(iLocal); if (sets[setIndex].Distance != 0) { Ldc(sets[setIndex].Distance); Add(); } Call(s_spanGetItemMethod); LdindU2(); EmitMatchCharacterClass(sets[setIndex].Set, sets[setIndex].CaseInsensitive); BrfalseFar(charNotInClassLabel); } // base.runtextpos = pos + i; // return true; Ldthis(); Ldloc(pos); Ldloc(iLocal); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); if (needLoop) { MarkLabel(charNotInClassLabel); // for (...; ...; i++) Ldloc(iLocal); Ldc(1); Add(); Stloc(iLocal); // for (...; i < span.Length - (minRequiredLength - 1); ...); MarkLabel(checkSpanLengthLabel); Ldloc(iLocal); Ldloca(textSpanLocal); Call(s_spanGetLengthMethod); if (setsToUse > 1 || primarySet.Distance != 0) { Ldc(minRequiredLength - 1); Sub(); } BltFar(loopBody); // base.runtextpos = inputSpan.Length; // return false; BrFar(returnFalse); } } // Emits a search for a literal following a leading atomic single-character loop. void EmitLiteralAfterAtomicLoop() { Debug.Assert(_regexTree.FindOptimizations.LiteralAfterLoop is not null); (RegexNode LoopNode, (char Char, string? String, char[]? Chars) Literal) target = _regexTree.FindOptimizations.LiteralAfterLoop.Value; Debug.Assert(target.LoopNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic); Debug.Assert(target.LoopNode.N == int.MaxValue); // while (true) Label loopBody = DefineLabel(); Label loopEnd = DefineLabel(); MarkLabel(loopBody); // ReadOnlySpan<char> slice = inputSpan.Slice(pos); using RentedLocalBuilder slice = RentReadOnlySpanCharLocal(); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(slice); // Find the literal. If we can't find it, we're done searching. // int i = slice.IndexOf(literal); // if (i < 0) break; using RentedLocalBuilder i = RentInt32Local(); Ldloc(slice); if (target.Literal.String is string literalString) { Ldstr(literalString); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); } else if (target.Literal.Chars is not char[] literalChars) { Ldc(target.Literal.Char); Call(s_spanIndexOfChar); } else { switch (literalChars.Length) { case 2: Ldc(literalChars[0]); Ldc(literalChars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(literalChars[0]); Ldc(literalChars[1]); Ldc(literalChars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(new string(literalChars)); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } Stloc(i); Ldloc(i); Ldc(0); BltFar(loopEnd); // We found the literal. Walk backwards from it finding as many matches as we can against the loop. // int prev = i; using RentedLocalBuilder prev = RentInt32Local(); Ldloc(i); Stloc(prev); // while ((uint)--prev < (uint)slice.Length) && MatchCharClass(slice[prev])); Label innerLoopBody = DefineLabel(); Label innerLoopEnd = DefineLabel(); MarkLabel(innerLoopBody); Ldloc(prev); Ldc(1); Sub(); Stloc(prev); Ldloc(prev); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUn(innerLoopEnd); Ldloca(slice); Ldloc(prev); Call(s_spanGetItemMethod); LdindU2(); EmitMatchCharacterClass(target.LoopNode.Str!, caseInsensitive: false); BrtrueFar(innerLoopBody); MarkLabel(innerLoopEnd); if (target.LoopNode.M > 0) { // If we found fewer than needed, loop around to try again. The loop doesn't overlap with the literal, // so we can start from after the last place the literal matched. // if ((i - prev - 1) < target.LoopNode.M) // { // pos += i + 1; // continue; // } Label metMinimum = DefineLabel(); Ldloc(i); Ldloc(prev); Sub(); Ldc(1); Sub(); Ldc(target.LoopNode.M); Bge(metMinimum); Ldloc(pos); Ldloc(i); Add(); Ldc(1); Add(); Stloc(pos); BrFar(loopBody); MarkLabel(metMinimum); } // We have a winner. The starting position is just after the last position that failed to match the loop. // TODO: It'd be nice to be able to communicate i as a place the matching engine can start matching // after the loop, so that it doesn't need to re-match the loop. // base.runtextpos = pos + prev + 1; // return true; Ldthis(); Ldloc(pos); Ldloc(prev); Add(); Ldc(1); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); // } MarkLabel(loopEnd); // base.runtextpos = inputSpan.Length; // return false; BrFar(returnFalse); } } /// <summary>Generates the implementation for TryMatchAtCurrentPosition.</summary> protected void EmitTryMatchAtCurrentPosition() { // In .NET Framework and up through .NET Core 3.1, the code generated for RegexOptions.Compiled was effectively an unrolled // version of what RegexInterpreter would process. The RegexNode tree would be turned into a series of opcodes via // RegexWriter; the interpreter would then sit in a loop processing those opcodes, and the RegexCompiler iterated through the // opcodes generating code for each equivalent to what the interpreter would do albeit with some decisions made at compile-time // rather than at run-time. This approach, however, lead to complicated code that wasn't pay-for-play (e.g. a big backtracking // jump table that all compilations went through even if there was no backtracking), that didn't factor in the shape of the // tree (e.g. it's difficult to add optimizations based on interactions between nodes in the graph), and that didn't read well // when decompiled from IL to C# or when directly emitted as C# as part of a source generator. // // This implementation is instead based on directly walking the RegexNode tree and outputting code for each node in the graph. // A dedicated for each kind of RegexNode emits the code necessary to handle that node's processing, including recursively // calling the relevant function for any of its children nodes. Backtracking is handled not via a giant jump table, but instead // by emitting direct jumps to each backtracking construct. This is achieved by having all match failures jump to a "done" // label that can be changed by a previous emitter, e.g. before EmitLoop returns, it ensures that "doneLabel" is set to the // label that code should jump back to when backtracking. That way, a subsequent EmitXx function doesn't need to know exactly // where to jump: it simply always jumps to "doneLabel" on match failure, and "doneLabel" is always configured to point to // the right location. In an expression without backtracking, or before any backtracking constructs have been encountered, // "doneLabel" is simply the final return location from the TryMatchAtCurrentPosition method that will undo any captures and exit, signaling to // the calling scan loop that nothing was matched. Debug.Assert(_regexTree != null); _int32LocalsPool?.Clear(); _readOnlySpanCharLocalsPool?.Clear(); // Get the root Capture node of the tree. RegexNode node = _regexTree.Root; Debug.Assert(node.Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); Debug.Assert(node.ChildCount() == 1, "Capture nodes should have one child"); // Skip the Capture node. We handle the implicit root capture specially. node = node.Child(0); // In some limited cases, TryFindNextPossibleStartingPosition will only return true if it successfully matched the whole expression. // We can special case these to do essentially nothing in TryMatchAtCurrentPosition other than emit the capture. switch (node.Kind) { case RegexNodeKind.Multi or RegexNodeKind.Notone or RegexNodeKind.One or RegexNodeKind.Set when !IsCaseInsensitive(node): // This is the case for single and multiple characters, though the whole thing is only guaranteed // to have been validated in TryFindNextPossibleStartingPosition when doing case-sensitive comparison. // base.Capture(0, base.runtextpos, base.runtextpos + node.Str.Length); // base.runtextpos = base.runtextpos + node.Str.Length; // return true; Ldthis(); Dup(); Ldc(0); Ldthisfld(s_runtextposField); Dup(); Ldc(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1); Add(); Call(s_captureMethod); Ldthisfld(s_runtextposField); Ldc(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); return; // The source generator special-cases RegexNode.Empty, for purposes of code learning rather than // performance. Since that's not applicable to RegexCompiler, that code isn't mirrored here. } AnalysisResults analysis = RegexTreeAnalyzer.Analyze(_regexTree); // Initialize the main locals used throughout the implementation. LocalBuilder inputSpan = DeclareReadOnlySpanChar(); LocalBuilder originalPos = DeclareInt32(); LocalBuilder pos = DeclareInt32(); LocalBuilder slice = DeclareReadOnlySpanChar(); Label doneLabel = DefineLabel(); Label originalDoneLabel = doneLabel; if (_hasTimeout) { _loopTimeoutCounter = DeclareInt32(); } // CultureInfo culture = CultureInfo.CurrentCulture; // only if the whole expression or any subportion is ignoring case, and we're not using invariant InitializeCultureForTryMatchAtCurrentPositionIfNecessary(analysis); // ReadOnlySpan<char> inputSpan = input; Ldarg_1(); Stloc(inputSpan); // int pos = base.runtextpos; // int originalpos = pos; Ldthisfld(s_runtextposField); Stloc(pos); Ldloc(pos); Stloc(originalPos); // int stackpos = 0; LocalBuilder stackpos = DeclareInt32(); Ldc(0); Stloc(stackpos); // The implementation tries to use const indexes into the span wherever possible, which we can do // for all fixed-length constructs. In such cases (e.g. single chars, repeaters, strings, etc.) // we know at any point in the regex exactly how far into it we are, and we can use that to index // into the span created at the beginning of the routine to begin at exactly where we're starting // in the input. When we encounter a variable-length construct, we transfer the static value to // pos, slicing the inputSpan appropriately, and then zero out the static position. int sliceStaticPos = 0; SliceInputSpan(); // Check whether there are captures anywhere in the expression. If there isn't, we can skip all // the boilerplate logic around uncapturing, as there won't be anything to uncapture. bool expressionHasCaptures = analysis.MayContainCapture(node); // Emit the code for all nodes in the tree. EmitNode(node); // pos += sliceStaticPos; // base.runtextpos = pos; // Capture(0, originalpos, pos); // return true; Ldthis(); Ldloc(pos); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Add(); Stloc(pos); Ldloc(pos); } Stfld(s_runtextposField); Ldthis(); Ldc(0); Ldloc(originalPos); Ldloc(pos); Call(s_captureMethod); Ldc(1); Ret(); // NOTE: The following is a difference from the source generator. The source generator emits: // UncaptureUntil(0); // return false; // at every location where the all-up match is known to fail. In contrast, the compiler currently // emits this uncapture/return code in one place and jumps to it upon match failure. The difference // stems primarily from the return-at-each-location pattern resulting in cleaner / easier to read // source code, which is not an issue for RegexCompiler emitting IL instead of C#. // If the graph contained captures, undo any remaining to handle failed matches. if (expressionHasCaptures) { // while (base.Crawlpos() != 0) base.Uncapture(); Label finalReturnLabel = DefineLabel(); Br(finalReturnLabel); MarkLabel(originalDoneLabel); Label condition = DefineLabel(); Label body = DefineLabel(); Br(condition); MarkLabel(body); Ldthis(); Call(s_uncaptureMethod); MarkLabel(condition); Ldthis(); Call(s_crawlposMethod); Brtrue(body); // Done: MarkLabel(finalReturnLabel); } else { // Done: MarkLabel(originalDoneLabel); } // return false; Ldc(0); Ret(); // Generated code successfully. return; static bool IsCaseInsensitive(RegexNode node) => (node.Options & RegexOptions.IgnoreCase) != 0; // Slices the inputSpan starting at pos until end and stores it into slice. void SliceInputSpan() { // slice = inputSpan.Slice(pos); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(slice); } // Emits the sum of a constant and a value from a local. void EmitSum(int constant, LocalBuilder? local) { if (local == null) { Ldc(constant); } else if (constant == 0) { Ldloc(local); } else { Ldloc(local); Ldc(constant); Add(); } } // Emits a check that the span is large enough at the currently known static position to handle the required additional length. void EmitSpanLengthCheck(int requiredLength, LocalBuilder? dynamicRequiredLength = null) { // if ((uint)(sliceStaticPos + requiredLength + dynamicRequiredLength - 1) >= (uint)slice.Length) goto Done; Debug.Assert(requiredLength > 0); EmitSum(sliceStaticPos + requiredLength - 1, dynamicRequiredLength); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(doneLabel); } // Emits code to get ref slice[sliceStaticPos] void EmitTextSpanOffset() { Ldloc(slice); Call(s_memoryMarshalGetReference); if (sliceStaticPos > 0) { Ldc(sliceStaticPos * sizeof(char)); Add(); } } // Adds the value of sliceStaticPos into the pos local, slices textspan by the corresponding amount, // and zeros out sliceStaticPos. void TransferSliceStaticPosToPos() { if (sliceStaticPos > 0) { // pos += sliceStaticPos; Ldloc(pos); Ldc(sliceStaticPos); Add(); Stloc(pos); // slice = slice.Slice(sliceStaticPos); Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); Stloc(slice); // sliceStaticPos = 0; sliceStaticPos = 0; } } // Emits the code for an alternation. void EmitAlternation(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Alternate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); int childCount = node.ChildCount(); Debug.Assert(childCount >= 2); Label originalDoneLabel = doneLabel; // Both atomic and non-atomic are supported. While a parent RegexNode.Atomic node will itself // successfully prevent backtracking into this child node, we can emit better / cheaper code // for an Alternate when it is atomic, so we still take it into account here. Debug.Assert(node.Parent is not null); bool isAtomic = analysis.IsAtomicByAncestor(node); // Label to jump to when any branch completes successfully. Label matchLabel = DefineLabel(); // Save off pos. We'll need to reset this each time a branch fails. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; // We need to be able to undo captures in two situations: // - If a branch of the alternation itself contains captures, then if that branch // fails to match, any captures from that branch until that failure point need to // be uncaptured prior to jumping to the next branch. // - If the expression after the alternation contains captures, then failures // to match in those expressions could trigger backtracking back into the // alternation, and thus we need uncapture any of them. // As such, if the alternation contains captures or if it's not atomic, we need // to grab the current crawl position so we can unwind back to it when necessary. // We can do all of the uncapturing as part of falling through to the next branch. // If we fail in a branch, then such uncapturing will unwind back to the position // at the start of the alternation. If we fail after the alternation, and the // matched branch didn't contain any backtracking, then the failure will end up // jumping to the next branch, which will unwind the captures. And if we fail after // the alternation and the matched branch did contain backtracking, that backtracking // construct is responsible for unwinding back to its starting crawl position. If // it eventually ends up failing, that failure will result in jumping to the next branch // of the alternation, which will again dutifully unwind the remaining captures until // what they were at the start of the alternation. Of course, if there are no captures // anywhere in the regex, we don't have to do any of that. LocalBuilder? startingCapturePos = null; if (expressionHasCaptures && (analysis.MayContainCapture(node) || !isAtomic)) { // startingCapturePos = base.Crawlpos(); startingCapturePos = DeclareInt32(); Ldthis(); Call(s_crawlposMethod); Stloc(startingCapturePos); } // After executing the alternation, subsequent matching may fail, at which point execution // will need to backtrack to the alternation. We emit a branching table at the end of the // alternation, with a label that will be left as the "doneLabel" upon exiting emitting the // alternation. The branch table is populated with an entry for each branch of the alternation, // containing either the label for the last backtracking construct in the branch if such a construct // existed (in which case the doneLabel upon emitting that node will be different from before it) // or the label for the next branch. var labelMap = new Label[childCount]; Label backtrackLabel = DefineLabel(); for (int i = 0; i < childCount; i++) { bool isLastBranch = i == childCount - 1; Label nextBranch = default; if (!isLastBranch) { // Failure to match any branch other than the last one should result // in jumping to process the next branch. nextBranch = DefineLabel(); doneLabel = nextBranch; } else { // Failure to match the last branch is equivalent to failing to match // the whole alternation, which means those failures should jump to // what "doneLabel" was defined as when starting the alternation. doneLabel = originalDoneLabel; } // Emit the code for each branch. EmitNode(node.Child(i)); // Add this branch to the backtracking table. At this point, either the child // had backtracking constructs, in which case doneLabel points to the last one // and that's where we'll want to jump to, or it doesn't, in which case doneLabel // still points to the nextBranch, which similarly is where we'll want to jump to. if (!isAtomic) { // if (stackpos + 3 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = i; // base.runstack[stackpos++] = startingCapturePos; // base.runstack[stackpos++] = startingPos; EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldc(i)); if (startingCapturePos is not null) { EmitStackPush(() => Ldloc(startingCapturePos)); } EmitStackPush(() => Ldloc(startingPos)); } labelMap[i] = doneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. // pos += sliceStaticPos; // sliceStaticPos = 0; // goto matchLabel; TransferSliceStaticPosToPos(); BrFar(matchLabel); // Reset state for next branch and loop around to generate it. This includes // setting pos back to what it was at the beginning of the alternation, // updating slice to be the full length it was, and if there's a capture that // needs to be reset, uncapturing it. if (!isLastBranch) { // NextBranch: // pos = startingPos; // slice = inputSpan.Slice(pos); // while (base.Crawlpos() > startingCapturePos) base.Uncapture(); MarkLabel(nextBranch); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } } } // We should never fall through to this location in the generated code. Either // a branch succeeded in matching and jumped to the end, or a branch failed in // matching and jumped to the next branch location. We only get to this code // if backtracking occurs and the code explicitly jumps here based on our setting // "doneLabel" to the label for this section. Thus, we only need to emit it if // something can backtrack to us, which can't happen if we're inside of an atomic // node. Thus, emit the backtracking section only if we're non-atomic. if (isAtomic) { doneLabel = originalDoneLabel; } else { doneLabel = backtrackLabel; MarkLabel(backtrackLabel); // startingPos = base.runstack[--stackpos]; // startingCapturePos = base.runstack[--stackpos]; // switch (base.runstack[--stackpos]) { ... } // branch number EmitStackPop(); Stloc(startingPos); if (startingCapturePos is not null) { EmitStackPop(); Stloc(startingCapturePos); } EmitStackPop(); Switch(labelMap); } // Successfully completed the alternate. MarkLabel(matchLabel); Debug.Assert(sliceStaticPos == 0); } // Emits the code to handle a backreference. void EmitBackreference(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Backreference, $"Unexpected type: {node.Kind}"); int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); TransferSliceStaticPosToPos(); Label backreferenceEnd = DefineLabel(); // if (!base.IsMatched(capnum)) goto (ecmascript ? end : doneLabel); Ldthis(); Ldc(capnum); Call(s_isMatchedMethod); BrfalseFar((node.Options & RegexOptions.ECMAScript) == 0 ? doneLabel : backreferenceEnd); using RentedLocalBuilder matchLength = RentInt32Local(); using RentedLocalBuilder matchIndex = RentInt32Local(); using RentedLocalBuilder i = RentInt32Local(); // int matchLength = base.MatchLength(capnum); Ldthis(); Ldc(capnum); Call(s_matchLengthMethod); Stloc(matchLength); // if (slice.Length < matchLength) goto doneLabel; Ldloca(slice); Call(s_spanGetLengthMethod); Ldloc(matchLength); BltFar(doneLabel); // int matchIndex = base.MatchIndex(capnum); Ldthis(); Ldc(capnum); Call(s_matchIndexMethod); Stloc(matchIndex); Label condition = DefineLabel(); Label body = DefineLabel(); // for (int i = 0; ...) Ldc(0); Stloc(i); Br(condition); MarkLabel(body); // if (inputSpan[matchIndex + i] != slice[i]) goto doneLabel; Ldloca(inputSpan); Ldloc(matchIndex); Ldloc(i); Add(); Call(s_spanGetItemMethod); LdindU2(); if (IsCaseInsensitive(node)) { CallToLower(); } Ldloca(slice); Ldloc(i); Call(s_spanGetItemMethod); LdindU2(); if (IsCaseInsensitive(node)) { CallToLower(); } BneFar(doneLabel); // for (...; ...; i++) Ldloc(i); Ldc(1); Add(); Stloc(i); // for (...; i < matchLength; ...) MarkLabel(condition); Ldloc(i); Ldloc(matchLength); Blt(body); // pos += matchLength; Ldloc(pos); Ldloc(matchLength); Add(); Stloc(pos); SliceInputSpan(); MarkLabel(backreferenceEnd); } // Emits the code for an if(backreference)-then-else conditional. void EmitBackreferenceConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.BackreferenceConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 2, $"Expected 2 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // Get the capture number to test. int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(0); RegexNode? noBranch = node.Child(1) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; Label originalDoneLabel = doneLabel; Label refNotMatched = DefineLabel(); Label endConditional = DefineLabel(); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the conditional needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. LocalBuilder resumeAt = DeclareInt32(); // if (!base.IsMatched(capnum)) goto refNotMatched; Ldthis(); Ldc(capnum); Call(s_isMatchedMethod); BrfalseFar(refNotMatched); // The specified capture was captured. Run the "yes" branch. // If it successfully matches, jump to the end. EmitNode(yesBranch); TransferSliceStaticPosToPos(); Label postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 0; Ldc(0); Stloc(resumeAt); } bool needsEndConditional = postYesDoneLabel != originalDoneLabel || noBranch is not null; if (needsEndConditional) { // goto endConditional; BrFar(endConditional); } MarkLabel(refNotMatched); Label postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { // resumeAt = 1; Ldc(1); Stloc(resumeAt); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 2; Ldc(2); Stloc(resumeAt); } } if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { // We're atomic by our parent, so even if either child branch has backtracking constructs, // we don't need to emit any backtracking logic in support, as nothing will backtrack in. // Instead, we just ensure we revert back to the original done label so that any backtracking // skips over this node. doneLabel = originalDoneLabel; if (needsEndConditional) { MarkLabel(endConditional); } } else { // Subsequent expressions might try to backtrack to here, so output a backtracking map based on resumeAt. // Skip the backtracking section // goto endConditional; Debug.Assert(needsEndConditional); Br(endConditional); // Backtrack section Label backtrack = DefineLabel(); doneLabel = backtrack; MarkLabel(backtrack); // Pop from the stack the branch that was used and jump back to its backtracking location. // resumeAt = base.runstack[--stackpos]; EmitStackPop(); Stloc(resumeAt); if (postYesDoneLabel != originalDoneLabel) { // if (resumeAt == 0) goto postIfDoneLabel; Ldloc(resumeAt); Ldc(0); BeqFar(postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { // if (resumeAt == 1) goto postNoDoneLabel; Ldloc(resumeAt); Ldc(1); BeqFar(postNoDoneLabel); } // goto originalDoneLabel; BrFar(originalDoneLabel); if (needsEndConditional) { MarkLabel(endConditional); } // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = resumeAt; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(resumeAt)); } } // Emits the code for an if(expression)-then-else conditional. void EmitExpressionConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 3, $"Expected 3 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // The first child node is the condition expression. If this matches, then we branch to the "yes" branch. // If it doesn't match, then we branch to the optional "no" branch if it exists, or simply skip the "yes" // branch, otherwise. The condition is treated as a positive lookahead. RegexNode condition = node.Child(0); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(1); RegexNode? noBranch = node.Child(2) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; Label originalDoneLabel = doneLabel; Label expressionNotMatched = DefineLabel(); Label endConditional = DefineLabel(); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the condition needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. LocalBuilder? resumeAt = null; if (!isAtomic) { resumeAt = DeclareInt32(); } // If the condition expression has captures, we'll need to uncapture them in the case of no match. LocalBuilder? startingCapturePos = null; if (analysis.MayContainCapture(condition)) { // int startingCapturePos = base.Crawlpos(); startingCapturePos = DeclareInt32(); Ldthis(); Call(s_crawlposMethod); Stloc(startingCapturePos); } // Emit the condition expression. Route any failures to after the yes branch. This code is almost // the same as for a positive lookahead; however, a positive lookahead only needs to reset the position // on a successful match, as a failed match fails the whole expression; here, we need to reset the // position on completion, regardless of whether the match is successful or not. doneLabel = expressionNotMatched; // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingSliceStaticPos = sliceStaticPos; // Emit the child. The condition expression is a zero-width assertion, which is atomic, // so prevent backtracking into it. EmitNode(condition); doneLabel = originalDoneLabel; // After the condition completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. // pos = startingPos; // slice = inputSpan.Slice(pos); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingSliceStaticPos; // The expression matched. Run the "yes" branch. If it successfully matches, jump to the end. EmitNode(yesBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch Label postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 0; Ldc(0); Stloc(resumeAt!); } // goto endConditional; BrFar(endConditional); // After the condition completes unsuccessfully, reset the text positions // _and_ reset captures, which should not persist when the whole expression failed. // pos = startingPos; MarkLabel(expressionNotMatched); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } Label postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { // resumeAt = 1; Ldc(1); Stloc(resumeAt!); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 2; Ldc(2); Stloc(resumeAt!); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { // EndConditional: doneLabel = originalDoneLabel; MarkLabel(endConditional); } else { Debug.Assert(resumeAt is not null); // Skip the backtracking section. BrFar(endConditional); Label backtrack = DefineLabel(); doneLabel = backtrack; MarkLabel(backtrack); // resumeAt = StackPop(); EmitStackPop(); Stloc(resumeAt); if (postYesDoneLabel != originalDoneLabel) { // if (resumeAt == 0) goto postYesDoneLabel; Ldloc(resumeAt); Ldc(0); BeqFar(postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { // if (resumeAt == 1) goto postNoDoneLabel; Ldloc(resumeAt); Ldc(1); BeqFar(postNoDoneLabel); } // goto postConditionalDoneLabel; BrFar(originalDoneLabel); // EndConditional: MarkLabel(endConditional); // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = resumeAt; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(resumeAt!)); } } // Emits the code for a Capture node. void EmitCapture(RegexNode node, RegexNode? subsequent = null) { Debug.Assert(node.Kind is RegexNodeKind.Capture, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); int uncapnum = RegexParser.MapCaptureNumber(node.N, _regexTree.CaptureNumberSparseMapping); bool isAtomic = analysis.IsAtomicByAncestor(node); // pos += sliceStaticPos; // slice = slice.Slice(sliceStaticPos); // startingPos = pos; TransferSliceStaticPosToPos(); LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); RegexNode child = node.Child(0); if (uncapnum != -1) { // if (!IsMatched(uncapnum)) goto doneLabel; Ldthis(); Ldc(uncapnum); Call(s_isMatchedMethod); BrfalseFar(doneLabel); } // Emit child node. Label originalDoneLabel = doneLabel; EmitNode(child, subsequent); bool childBacktracks = doneLabel != originalDoneLabel; // pos += sliceStaticPos; // slice = slice.Slice(sliceStaticPos); TransferSliceStaticPosToPos(); if (uncapnum == -1) { // Capture(capnum, startingPos, pos); Ldthis(); Ldc(capnum); Ldloc(startingPos); Ldloc(pos); Call(s_captureMethod); } else { // TransferCapture(capnum, uncapnum, startingPos, pos); Ldthis(); Ldc(capnum); Ldc(uncapnum); Ldloc(startingPos); Ldloc(pos); Call(s_transferCaptureMethod); } if (isAtomic || !childBacktracks) { // If the capture is atomic and nothing can backtrack into it, we're done. // Similarly, even if the capture isn't atomic, if the captured expression // doesn't do any backtracking, we're done. doneLabel = originalDoneLabel; } else { // We're not atomic and the child node backtracks. When it does, we need // to ensure that the starting position for the capture is appropriately // reset to what it was initially (it could have changed as part of being // in a loop or similar). So, we emit a backtracking section that // pushes/pops the starting position before falling through. // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = startingPos; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(startingPos)); // Skip past the backtracking section // goto backtrackingEnd; Label backtrackingEnd = DefineLabel(); Br(backtrackingEnd); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); EmitStackPop(); Stloc(startingPos); if (!childBacktracks) { // pos = startingPos Ldloc(startingPos); Stloc(pos); SliceInputSpan(); } // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } // Emits code to unwind the capture stack until the crawl position specified in the provided local. void EmitUncaptureUntil(LocalBuilder startingCapturePos) { Debug.Assert(startingCapturePos != null); // while (base.Crawlpos() > startingCapturePos) base.Uncapture(); Label condition = DefineLabel(); Label body = DefineLabel(); Br(condition); MarkLabel(body); Ldthis(); Call(s_uncaptureMethod); MarkLabel(condition); Ldthis(); Call(s_crawlposMethod); Ldloc(startingCapturePos); Bgt(body); } // Emits the code to handle a positive lookahead assertion. void EmitPositiveLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.PositiveLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // After the child completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. // pos = startingPos; // slice = inputSpan.Slice(pos); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; } // Emits the code to handle a negative lookahead assertion. void EmitNegativeLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Label originalDoneLabel = doneLabel; // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; Label negativeLookaheadDoneLabel = DefineLabel(); doneLabel = negativeLookaheadDoneLabel; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // If the generated code ends up here, it matched the lookahead, which actually // means failure for a _negative_ lookahead, so we need to jump to the original done. // goto originalDoneLabel; BrFar(originalDoneLabel); // Failures (success for a negative lookahead) jump here. MarkLabel(negativeLookaheadDoneLabel); if (doneLabel == negativeLookaheadDoneLabel) { doneLabel = originalDoneLabel; } // After the child completes in failure (success for negative lookahead), reset the text positions. // pos = startingPos; Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; doneLabel = originalDoneLabel; } // Emits the code for the node. void EmitNode(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { StackHelper.CallOnEmptyStack(EmitNode, node, subsequent, emitLengthChecksIfRequired); return; } switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: case RegexNodeKind.Bol: case RegexNodeKind.Eol: case RegexNodeKind.End: case RegexNodeKind.EndZ: EmitAnchors(node); break; case RegexNodeKind.Boundary: case RegexNodeKind.NonBoundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.NonECMABoundary: EmitBoundary(node); break; case RegexNodeKind.Multi: EmitMultiChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: EmitSingleChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: EmitSingleCharLoop(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: EmitSingleCharLazy(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: EmitSingleCharAtomicLoop(node); break; case RegexNodeKind.Loop: EmitLoop(node); break; case RegexNodeKind.Lazyloop: EmitLazy(node); break; case RegexNodeKind.Alternate: EmitAlternation(node); break; case RegexNodeKind.Concatenate: EmitConcatenation(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Atomic: EmitAtomic(node, subsequent); break; case RegexNodeKind.Backreference: EmitBackreference(node); break; case RegexNodeKind.BackreferenceConditional: EmitBackreferenceConditional(node); break; case RegexNodeKind.ExpressionConditional: EmitExpressionConditional(node); break; case RegexNodeKind.Capture: EmitCapture(node, subsequent); break; case RegexNodeKind.PositiveLookaround: EmitPositiveLookaheadAssertion(node); break; case RegexNodeKind.NegativeLookaround: EmitNegativeLookaheadAssertion(node); break; case RegexNodeKind.Nothing: BrFar(doneLabel); break; case RegexNodeKind.Empty: // Emit nothing. break; case RegexNodeKind.UpdateBumpalong: EmitUpdateBumpalong(node); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } } // Emits the node for an atomic. void EmitAtomic(RegexNode node, RegexNode? subsequent) { Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); RegexNode child = node.Child(0); if (!analysis.MayBacktrack(child)) { // If the child has no backtracking, the atomic is a nop and we can just skip it. // Note that the source generator equivalent for this is in the top-level EmitNode, in order to avoid // outputting some extra comments and scopes. As such formatting isn't a concern for the compiler, // the logic is instead here in EmitAtomic. EmitNode(child, subsequent); return; } // Grab the current done label and the current backtracking position. The purpose of the atomic node // is to ensure that nodes after it that might backtrack skip over the atomic, which means after // rendering the atomic's child, we need to reset the label so that subsequent backtracking doesn't // see any label left set by the atomic's child. We also need to reset the backtracking stack position // so that the state on the stack remains consistent. Label originalDoneLabel = doneLabel; // int startingStackpos = stackpos; using RentedLocalBuilder startingStackpos = RentInt32Local(); Ldloc(stackpos); Stloc(startingStackpos); // Emit the child. EmitNode(child, subsequent); // Reset the stack position and done label. // stackpos = startingStackpos; Ldloc(startingStackpos); Stloc(stackpos); doneLabel = originalDoneLabel; } // Emits the code to handle updating base.runtextpos to pos in response to // an UpdateBumpalong node. This is used when we want to inform the scan loop that // it should bump from this location rather than from the original location. void EmitUpdateBumpalong(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.UpdateBumpalong, $"Unexpected type: {node.Kind}"); // if (base.runtextpos < pos) // { // base.runtextpos = pos; // } TransferSliceStaticPosToPos(); Ldthisfld(s_runtextposField); Ldloc(pos); Label skipUpdate = DefineLabel(); Bge(skipUpdate); Ldthis(); Ldloc(pos); Stfld(s_runtextposField); MarkLabel(skipUpdate); } // Emits code for a concatenation void EmitConcatenation(RegexNode node, RegexNode? subsequent, bool emitLengthChecksIfRequired) { Debug.Assert(node.Kind is RegexNodeKind.Concatenate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); // Emit the code for each child one after the other. int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { // If we can find a subsequence of fixed-length children, we can emit a length check once for that sequence // and then skip the individual length checks for each. if (emitLengthChecksIfRequired && node.TryGetJoinableLengthCheckChildRange(i, out int requiredLength, out int exclusiveEnd)) { EmitSpanLengthCheck(requiredLength); for (; i < exclusiveEnd; i++) { EmitNode(node.Child(i), GetSubsequent(i, node, subsequent), emitLengthChecksIfRequired: false); } i--; continue; } EmitNode(node.Child(i), GetSubsequent(i, node, subsequent)); } // Gets the node to treat as the subsequent one to node.Child(index) static RegexNode? GetSubsequent(int index, RegexNode node, RegexNode? subsequent) { int childCount = node.ChildCount(); for (int i = index + 1; i < childCount; i++) { RegexNode next = node.Child(i); if (next.Kind is not RegexNodeKind.UpdateBumpalong) // skip node types that don't have a semantic impact { return next; } } return subsequent; } } // Emits the code to handle a single-character match. void EmitSingleChar(RegexNode node, bool emitLengthCheck = true, LocalBuilder? offset = null) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); // This only emits a single check, but it's called from the looping constructs in a loop // to generate the code for a single check, so we check for each "family" (one, notone, set) // rather than only for the specific single character nodes. // if ((uint)(sliceStaticPos + offset) >= slice.Length || slice[sliceStaticPos + offset] != ch) goto Done; if (emitLengthCheck) { EmitSpanLengthCheck(1, offset); } Ldloca(slice); EmitSum(sliceStaticPos, offset); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(doneLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(doneLabel); } else // IsNotoneFamily { BeqFar(doneLabel); } } sliceStaticPos++; } // Emits the code to handle a boundary check on a character. void EmitBoundary(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Boundary or RegexNodeKind.NonBoundary or RegexNodeKind.ECMABoundary or RegexNodeKind.NonECMABoundary, $"Unexpected type: {node.Kind}"); // if (!IsBoundary(inputSpan, pos + sliceStaticPos)) goto doneLabel; Ldthis(); Ldloc(inputSpan); Ldloc(pos); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Add(); } switch (node.Kind) { case RegexNodeKind.Boundary: Call(s_isBoundaryMethod); BrfalseFar(doneLabel); break; case RegexNodeKind.NonBoundary: Call(s_isBoundaryMethod); BrtrueFar(doneLabel); break; case RegexNodeKind.ECMABoundary: Call(s_isECMABoundaryMethod); BrfalseFar(doneLabel); break; default: Debug.Assert(node.Kind == RegexNodeKind.NonECMABoundary); Call(s_isECMABoundaryMethod); BrtrueFar(doneLabel); break; } } // Emits the code to handle various anchors. void EmitAnchors(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Beginning or RegexNodeKind.Start or RegexNodeKind.Bol or RegexNodeKind.End or RegexNodeKind.EndZ or RegexNodeKind.Eol, $"Unexpected type: {node.Kind}"); Debug.Assert(sliceStaticPos >= 0); switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: if (sliceStaticPos > 0) { // If we statically know we've already matched part of the regex, there's no way we're at the // beginning or start, as we've already progressed past it. BrFar(doneLabel); } else { // if (pos > 0/start) goto doneLabel; Ldloc(pos); if (node.Kind == RegexNodeKind.Beginning) { Ldc(0); } else { Ldthisfld(s_runtextstartField); } BneFar(doneLabel); } break; case RegexNodeKind.Bol: if (sliceStaticPos > 0) { // if (slice[sliceStaticPos - 1] != '\n') goto doneLabel; Ldloca(slice); Ldc(sliceStaticPos - 1); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); } else { // We can't use our slice in this case, because we'd need to access slice[-1], so we access the runtext field directly: // if (pos > 0 && base.runtext[pos - 1] != '\n') goto doneLabel; Label success = DefineLabel(); Ldloc(pos); Ldc(0); Ble(success); Ldloca(inputSpan); Ldloc(pos); Ldc(1); Sub(); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); MarkLabel(success); } break; case RegexNodeKind.End: // if (sliceStaticPos < slice.Length) goto doneLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BltUnFar(doneLabel); break; case RegexNodeKind.EndZ: // if (sliceStaticPos < slice.Length - 1) goto doneLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); Ldc(1); Sub(); BltFar(doneLabel); goto case RegexNodeKind.Eol; case RegexNodeKind.Eol: // if (sliceStaticPos < slice.Length && slice[sliceStaticPos] != '\n') goto doneLabel; { Label success = DefineLabel(); Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUn(success); Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); MarkLabel(success); } break; } } // Emits the code to handle a multiple-character match. void EmitMultiChar(RegexNode node, bool emitLengthCheck) { Debug.Assert(node.Kind is RegexNodeKind.Multi, $"Unexpected type: {node.Kind}"); EmitMultiCharString(node.Str!, IsCaseInsensitive(node), emitLengthCheck); } void EmitMultiCharString(string str, bool caseInsensitive, bool emitLengthCheck) { Debug.Assert(str.Length >= 2); if (caseInsensitive) // StartsWith(..., XxIgnoreCase) won't necessarily be the same as char-by-char comparison { // This case should be relatively rare. It will only occur with IgnoreCase and a series of non-ASCII characters. if (emitLengthCheck) { EmitSpanLengthCheck(str.Length); } foreach (char c in str) { // if (c != slice[sliceStaticPos++]) goto doneLabel; EmitTextSpanOffset(); sliceStaticPos++; LdindU2(); CallToLower(); Ldc(c); BneFar(doneLabel); } } else { // if (!slice.Slice(sliceStaticPos).StartsWith("...") goto doneLabel; Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); Ldstr(str); Call(s_stringAsSpanMethod); Call(s_spanStartsWith); BrfalseFar(doneLabel); sliceStaticPos += str.Length; } } // Emits the code to handle a backtracking, single-character loop. void EmitSingleCharLoop(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead; no backtracking necessary. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // Emit backtracking around an atomic single char loop. We can then implement the backtracking // as an afterthought, since we know exactly how many characters are accepted by each iteration // of the wrapped loop (1) and that there's nothing captured by the loop. Debug.Assert(node.M < node.N); Label backtrackingLabel = DefineLabel(); Label endLoop = DefineLabel(); LocalBuilder startingPos = DeclareInt32(); LocalBuilder endingPos = DeclareInt32(); LocalBuilder? capturepos = expressionHasCaptures ? DeclareInt32() : null; // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // Grab the current position, then emit the loop as atomic, and then // grab the current position again. Even though we emit the loop without // knowledge of backtracking, we can layer it on top by just walking back // through the individual characters (a benefit of the loop matching exactly // one character per iteration, no possible captures within the loop, etc.) // int startingPos = pos; Ldloc(pos); Stloc(startingPos); EmitSingleCharAtomicLoop(node); // pos += sliceStaticPos; // int endingPos = pos; TransferSliceStaticPosToPos(); Ldloc(pos); Stloc(endingPos); // int capturepos = base.Crawlpos(); if (capturepos is not null) { Ldthis(); Call(s_crawlposMethod); Stloc(capturepos); } // startingPos += node.M; if (node.M > 0) { Ldloc(startingPos); Ldc(node.M); Add(); Stloc(startingPos); } // goto endLoop; BrFar(endLoop); // Backtracking section. Subsequent failures will jump to here, at which // point we decrement the matched count as long as it's above the minimum // required, and try again by flowing to everything that comes after this. MarkLabel(backtrackingLabel); if (capturepos is not null) { // capturepos = base.runstack[--stackpos]; // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitStackPop(); Stloc(capturepos); EmitUncaptureUntil(capturepos); } // endingPos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(endingPos); EmitStackPop(); Stloc(startingPos); // if (startingPos >= endingPos) goto doneLabel; Ldloc(startingPos); Ldloc(endingPos); BgeFar(doneLabel); if (subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal) { // endingPos = inputSpan.Slice(startingPos, Math.Min(inputSpan.Length, endingPos + literal.Length - 1) - startingPos).LastIndexOf(literal); // if (endingPos < 0) // { // goto doneLabel; // } Ldloca(inputSpan); Ldloc(startingPos); if (literal.Item2 is not null) { Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldloc(endingPos); Ldc(literal.Item2.Length - 1); Add(); Call(s_mathMinIntInt); Ldloc(startingPos); Sub(); Call(s_spanSliceIntIntMethod); Ldstr(literal.Item2); Call(s_stringAsSpanMethod); Call(s_spanLastIndexOfSpan); } else { Ldloc(endingPos); Ldloc(startingPos); Sub(); Call(s_spanSliceIntIntMethod); if (literal.Item3 is not null) { switch (literal.Item3.Length) { case 2: Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Call(s_spanLastIndexOfAnyCharChar); break; case 3: Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Ldc(literal.Item3[2]); Call(s_spanLastIndexOfAnyCharCharChar); break; default: Ldstr(literal.Item3); Call(s_stringAsSpanMethod); Call(s_spanLastIndexOfAnySpan); break; } } else { Ldc(literal.Item1); Call(s_spanLastIndexOfChar); } } Stloc(endingPos); Ldloc(endingPos); Ldc(0); BltFar(doneLabel); // endingPos += startingPos; Ldloc(endingPos); Ldloc(startingPos); Add(); Stloc(endingPos); } else { // endingPos--; Ldloc(endingPos); Ldc(1); Sub(); Stloc(endingPos); } // pos = endingPos; Ldloc(endingPos); Stloc(pos); // slice = inputSpan.Slice(pos); SliceInputSpan(); MarkLabel(endLoop); EmitStackResizeIfNeeded(expressionHasCaptures ? 3 : 2); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(endingPos)); if (capturepos is not null) { EmitStackPush(() => Ldloc(capturepos!)); } doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes } void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy, $"Unexpected type: {node.Kind}"); // Emit the min iterations as a repeater. Any failures here don't necessitate backtracking, // as the lazy itself failed to match, and there's no backtracking possible by the individual // characters/iterations themselves. if (node.M > 0) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); } // If the whole thing was actually that repeater, we're done. Similarly, if this is actually an atomic // lazy loop, nothing will ever backtrack into this node, so we never need to iterate more than the minimum. if (node.M == node.N || analysis.IsAtomicByAncestor(node)) { return; } Debug.Assert(node.M < node.N); // We now need to match one character at a time, each time allowing the remainder of the expression // to try to match, and only matching another character if the subsequent expression fails to match. // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // If the loop isn't unbounded, track the number of iterations and the max number to allow. LocalBuilder? iterationCount = null; int? maxIterations = null; if (node.N != int.MaxValue) { maxIterations = node.N - node.M; // int iterationCount = 0; iterationCount = DeclareInt32(); Ldc(0); Stloc(iterationCount); } // Track the current crawl position. Upon backtracking, we'll unwind any captures beyond this point. LocalBuilder? capturepos = expressionHasCaptures ? DeclareInt32() : null; // Track the current pos. Each time we backtrack, we'll reset to the stored position, which // is also incremented each time we match another character in the loop. // int startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); // Skip the backtracking section for the initial subsequent matching. We've already matched the // minimum number of iterations, which means we can successfully match with zero additional iterations. // goto endLoopLabel; Label endLoopLabel = DefineLabel(); BrFar(endLoopLabel); // Backtracking section. Subsequent failures will jump to here. Label backtrackingLabel = DefineLabel(); MarkLabel(backtrackingLabel); // Uncapture any captures if the expression has any. It's possible the captures it has // are before this node, in which case this is wasted effort, but still functionally correct. if (capturepos is not null) { // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitUncaptureUntil(capturepos); } // If there's a max number of iterations, see if we've exceeded the maximum number of characters // to match. If we haven't, increment the iteration count. if (maxIterations is not null) { // if (iterationCount >= maxIterations) goto doneLabel; Ldloc(iterationCount!); Ldc(maxIterations.Value); BgeFar(doneLabel); // iterationCount++; Ldloc(iterationCount!); Ldc(1); Add(); Stloc(iterationCount!); } // Now match the next item in the lazy loop. We need to reset the pos to the position // just after the last character in this loop was matched, and we need to store the resulting position // for the next time we backtrack. // pos = startingPos; // Match single char; Ldloc(startingPos); Stloc(pos); SliceInputSpan(); EmitSingleChar(node); TransferSliceStaticPosToPos(); // Now that we've appropriately advanced by one character and are set for what comes after the loop, // see if we can skip ahead more iterations by doing a search for a following literal. if (iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && !IsCaseInsensitive(node) && subsequent?.FindStartingLiteral(4) is ValueTuple<char, string?, string?> literal && // 5 == max optimized by IndexOfAny, and we need to reserve 1 for node.Ch (literal.Item3 is not null ? !literal.Item3.Contains(node.Ch) : (literal.Item2?[0] ?? literal.Item1) != node.Ch)) // no overlap between node.Ch and the start of the literal { // e.g. "<[^>]*?>" // This lazy loop will consume all characters other than node.Ch until the subsequent literal. // We can implement it to search for either that char or the literal, whichever comes first. // If it ends up being that node.Ch, the loop fails (we're only here if we're backtracking). // startingPos = slice.IndexOfAny(node.Ch, literal); Ldloc(slice); if (literal.Item3 is not null) { switch (literal.Item3.Length) { case 2: Ldc(node.Ch); Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(node.Ch + literal.Item3); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } else { Ldc(node.Ch); Ldc(literal.Item2?[0] ?? literal.Item1); Call(s_spanIndexOfAnyCharChar); } Stloc(startingPos); // if ((uint)startingPos >= (uint)slice.Length) goto doneLabel; Ldloc(startingPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(doneLabel); // if (slice[startingPos] == node.Ch) goto doneLabel; Ldloca(slice); Ldloc(startingPos); Call(s_spanGetItemMethod); LdindU2(); Ldc(node.Ch); BeqFar(doneLabel); // pos += startingPos; // slice = inputSpace.Slice(pos); Ldloc(pos); Ldloc(startingPos); Add(); Stloc(pos); SliceInputSpan(); } else if (iterationCount is null && node.Kind is RegexNodeKind.Setlazy && node.Str == RegexCharClass.AnyClass && subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal2) { // e.g. ".*?string" with RegexOptions.Singleline // This lazy loop will consume all characters until the subsequent literal. If the subsequent literal // isn't found, the loop fails. We can implement it to just search for that literal. // startingPos = slice.IndexOf(literal); Ldloc(slice); if (literal2.Item2 is not null) { Ldstr(literal2.Item2); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); } else if (literal2.Item3 is not null) { switch (literal2.Item3.Length) { case 2: Ldc(literal2.Item3[0]); Ldc(literal2.Item3[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(literal2.Item3[0]); Ldc(literal2.Item3[1]); Ldc(literal2.Item3[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(literal2.Item3); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } else { Ldc(literal2.Item1); Call(s_spanIndexOfChar); } Stloc(startingPos); // if (startingPos < 0) goto doneLabel; Ldloc(startingPos); Ldc(0); BltFar(doneLabel); // pos += startingPos; // slice = inputSpace.Slice(pos); Ldloc(pos); Ldloc(startingPos); Add(); Stloc(pos); SliceInputSpan(); } // Store the position we've left off at in case we need to iterate again. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Update the done label for everything that comes after this node. This is done after we emit the single char // matching, as that failing indicates the loop itself has failed to match. Label originalDoneLabel = doneLabel; doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes MarkLabel(endLoopLabel); if (capturepos is not null) { // capturepos = base.CrawlPos(); Ldthis(); Call(s_crawlposMethod); Stloc(capturepos); } if (node.IsInLoop()) { // Store the loop's state // base.runstack[stackpos++] = startingPos; // base.runstack[stackpos++] = capturepos; // base.runstack[stackpos++] = iterationCount; EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); if (capturepos is not null) { EmitStackPush(() => Ldloc(capturepos)); } if (iterationCount is not null) { EmitStackPush(() => Ldloc(iterationCount)); } // Skip past the backtracking section Label backtrackingEnd = DefineLabel(); BrFar(backtrackingEnd); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // iterationCount = base.runstack[--stackpos]; // capturepos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; if (iterationCount is not null) { EmitStackPop(); Stloc(iterationCount); } if (capturepos is not null) { EmitStackPop(); Stloc(capturepos); } EmitStackPop(); Stloc(startingPos); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } void EmitLazy(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; Label originalDoneLabel = doneLabel; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually an atomic lazy loop, we need to output just the minimum number of iterations, // as nothing will backtrack into the lazy loop to get it progress further. if (isAtomic) { switch (minIterations) { case 0: // Atomic lazy with a min count of 0: nop. return; case 1: // Atomic lazy with a min count of 1: just output the child, no looping required. EmitNode(node.Child(0)); return; } } // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); LocalBuilder startingPos = DeclareInt32(); LocalBuilder iterationCount = DeclareInt32(); LocalBuilder sawEmpty = DeclareInt32(); Label body = DefineLabel(); Label endLoop = DefineLabel(); // iterationCount = 0; // startingPos = pos; // sawEmpty = 0; // false Ldc(0); Stloc(iterationCount); Ldloc(pos); Stloc(startingPos); Ldc(0); Stloc(sawEmpty); // If the min count is 0, start out by jumping right to what's after the loop. Backtracking // will then bring us back in to do further iterations. if (minIterations == 0) { // goto endLoop; BrFar(endLoop); } // Iteration body MarkLabel(body); EmitTimeoutCheck(); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. // base.runstack[stackpos++] = base.Crawlpos(); // base.runstack[stackpos++] = startingPos; // base.runstack[stackpos++] = pos; // base.runstack[stackpos++] = sawEmpty; EmitStackResizeIfNeeded(3); if (expressionHasCaptures) { EmitStackPush(() => { Ldthis(); Call(s_crawlposMethod); }); } EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(pos)); EmitStackPush(() => Ldloc(sawEmpty)); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. // iterationCount++; Ldloc(iterationCount); Ldc(1); Add(); Stloc(iterationCount); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. Label iterationFailedLabel = DefineLabel(); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 if (doneLabel == iterationFailedLabel) { doneLabel = originalDoneLabel; } // Loop condition. Continue iterating if we've not yet reached the minimum. if (minIterations > 0) { // if (iterationCount < minIterations) goto body; Ldloc(iterationCount); Ldc(minIterations); BltFar(body); } // If the last iteration was empty, we need to prevent further iteration from this point // unless we backtrack out of this iteration. We can do that easily just by pretending // we reached the max iteration count. // if (pos == startingPos) sawEmpty = 1; // true Label skipSawEmptySet = DefineLabel(); Ldloc(pos); Ldloc(startingPos); Bne(skipSawEmptySet); Ldc(1); Stloc(sawEmpty); MarkLabel(skipSawEmptySet); // We matched the next iteration. Jump to the subsequent code. // goto endLoop; BrFar(endLoop); // Now handle what happens when an iteration fails. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel); // iterationCount--; Ldloc(iterationCount); Ldc(1); Sub(); Stloc(iterationCount); // if (iterationCount < 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BltFar(originalDoneLabel); // sawEmpty = base.runstack[--stackpos]; // pos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; // capturepos = base.runstack[--stackpos]; // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitStackPop(); Stloc(sawEmpty); EmitStackPop(); Stloc(pos); EmitStackPop(); Stloc(startingPos); if (expressionHasCaptures) { using RentedLocalBuilder poppedCrawlPos = RentInt32Local(); EmitStackPop(); Stloc(poppedCrawlPos); EmitUncaptureUntil(poppedCrawlPos); } SliceInputSpan(); if (doneLabel == originalDoneLabel) { // goto originalDoneLabel; BrFar(originalDoneLabel); } else { // if (iterationCount == 0) goto originalDoneLabel; // goto doneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); BrFar(doneLabel); } MarkLabel(endLoop); if (!isAtomic) { // Store the capture's state and skip the backtracking section EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(iterationCount)); EmitStackPush(() => Ldloc(sawEmpty)); Label skipBacktrack = DefineLabel(); BrFar(skipBacktrack); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // sawEmpty = base.runstack[--stackpos]; // iterationCount = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(sawEmpty); EmitStackPop(); Stloc(iterationCount); EmitStackPop(); Stloc(startingPos); if (maxIterations == int.MaxValue) { // if (sawEmpty != 0) goto doneLabel; Ldloc(sawEmpty); Ldc(0); BneFar(doneLabel); } else { // if (iterationCount >= maxIterations || sawEmpty != 0) goto doneLabel; Ldloc(iterationCount); Ldc(maxIterations); BgeFar(doneLabel); Ldloc(sawEmpty); Ldc(0); BneFar(doneLabel); } // goto body; BrFar(body); doneLabel = backtrack; MarkLabel(skipBacktrack); } } // Emits the code to handle a loop (repeater) with a fixed number of iterations. // RegexNode.M is used for the number of iterations (RegexNode.N is ignored), as this // might be used to implement the required iterations of other kinds of loops. void EmitSingleCharRepeater(RegexNode node, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); int iterations = node.M; switch (iterations) { case 0: // No iterations, nothing to do. return; case 1: // Just match the individual item EmitSingleChar(node, emitLengthChecksIfRequired); return; case <= RegexNode.MultiVsRepeaterLimit when node.IsOneFamily && !IsCaseInsensitive(node): // This is a repeated case-sensitive character; emit it as a multi in order to get all the optimizations // afforded to a multi, e.g. unrolling the loop with multi-char reads/comparisons at a time. EmitMultiCharString(new string(node.Ch, iterations), caseInsensitive: false, emitLengthChecksIfRequired); return; } // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length) goto doneLabel; if (emitLengthChecksIfRequired) { EmitSpanLengthCheck(iterations); } // Arbitrary limit for unrolling vs creating a loop. We want to balance size in the generated // code with other costs, like the (small) overhead of slicing to create the temp span to iterate. const int MaxUnrollSize = 16; if (iterations <= MaxUnrollSize) { // if (slice[sliceStaticPos] != c1 || // slice[sliceStaticPos + 1] != c2 || // ...) // goto doneLabel; for (int i = 0; i < iterations; i++) { EmitSingleChar(node, emitLengthCheck: false); } } else { // ReadOnlySpan<char> tmp = slice.Slice(sliceStaticPos, iterations); // for (int i = 0; i < tmp.Length; i++) // { // TimeoutCheck(); // if (tmp[i] != ch) goto Done; // } // sliceStaticPos += iterations; Label conditionLabel = DefineLabel(); Label bodyLabel = DefineLabel(); using RentedLocalBuilder spanLocal = RentReadOnlySpanCharLocal(); Ldloca(slice); Ldc(sliceStaticPos); Ldc(iterations); Call(s_spanSliceIntIntMethod); Stloc(spanLocal); using RentedLocalBuilder iterationLocal = RentInt32Local(); Ldc(0); Stloc(iterationLocal); BrFar(conditionLabel); MarkLabel(bodyLabel); EmitTimeoutCheck(); LocalBuilder tmpTextSpanLocal = slice; // we want EmitSingleChar to refer to this temporary int tmpTextSpanPos = sliceStaticPos; slice = spanLocal; sliceStaticPos = 0; EmitSingleChar(node, emitLengthCheck: false, offset: iterationLocal); slice = tmpTextSpanLocal; sliceStaticPos = tmpTextSpanPos; Ldloc(iterationLocal); Ldc(1); Add(); Stloc(iterationLocal); MarkLabel(conditionLabel); Ldloc(iterationLocal); Ldloca(spanLocal); Call(s_spanGetLengthMethod); BltFar(bodyLabel); sliceStaticPos += iterations; } } // Emits the code to handle a non-backtracking, variable-length loop around a single character comparison. void EmitSingleCharAtomicLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead. if (node.M == node.N) { EmitSingleCharRepeater(node); return; } // If this is actually an optional single char, emit that instead. if (node.M == 0 && node.N == 1) { EmitAtomicSingleCharZeroOrOne(node); return; } Debug.Assert(node.N > node.M); int minIterations = node.M; int maxIterations = node.N; using RentedLocalBuilder iterationLocal = RentInt32Local(); Label atomicLoopDoneLabel = DefineLabel(); Span<char> setChars = stackalloc char[5]; // max optimized by IndexOfAny today int numSetChars = 0; if (node.IsNotoneFamily && maxIterations == int.MaxValue && (!IsCaseInsensitive(node))) { // For Notone, we're looking for a specific character, as everything until we find // it is consumed by the loop. If we're unbounded, such as with ".*" and if we're case-sensitive, // we can use the vectorized IndexOf to do the search, rather than open-coding it. The unbounded // restriction is purely for simplicity; it could be removed in the future with additional code to // handle the unbounded case. // int i = slice.Slice(sliceStaticPos).IndexOf(char); if (sliceStaticPos > 0) { Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); } else { Ldloc(slice); } Ldc(node.Ch); Call(s_spanIndexOfChar); Stloc(iterationLocal); // if (i >= 0) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldc(0); BgeFar(atomicLoopDoneLabel); // i = slice.Length - sliceStaticPos; Ldloca(slice); Call(s_spanGetLengthMethod); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Sub(); } Stloc(iterationLocal); } else if (node.IsSetFamily && maxIterations == int.MaxValue && !IsCaseInsensitive(node) && (numSetChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0 && RegexCharClass.IsNegated(node.Str!)) { // If the set is negated and contains only a few characters (if it contained 1 and was negated, it would // have been reduced to a Notone), we can use an IndexOfAny to find any of the target characters. // As with the notoneloopatomic above, the unbounded constraint is purely for simplicity. Debug.Assert(numSetChars > 1); // int i = slice.Slice(sliceStaticPos).IndexOfAny(ch1, ch2, ...); if (sliceStaticPos > 0) { Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); } else { Ldloc(slice); } switch (numSetChars) { case 2: Ldc(setChars[0]); Ldc(setChars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(setChars[0]); Ldc(setChars[1]); Ldc(setChars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(setChars.Slice(0, numSetChars).ToString()); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); break; } Stloc(iterationLocal); // if (i >= 0) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldc(0); BgeFar(atomicLoopDoneLabel); // i = slice.Length - sliceStaticPos; Ldloca(slice); Call(s_spanGetLengthMethod); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Sub(); } Stloc(iterationLocal); } else if (node.IsSetFamily && maxIterations == int.MaxValue && node.Str == RegexCharClass.AnyClass) { // .* was used with RegexOptions.Singleline, which means it'll consume everything. Just jump to the end. // The unbounded constraint is the same as in the Notone case above, done purely for simplicity. // int i = inputSpan.Length - pos; TransferSliceStaticPosToPos(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldloc(pos); Sub(); Stloc(iterationLocal); } else { // For everything else, do a normal loop. // Transfer sliceStaticPos to pos to help with bounds check elimination on the loop. TransferSliceStaticPosToPos(); Label conditionLabel = DefineLabel(); Label bodyLabel = DefineLabel(); // int i = 0; Ldc(0); Stloc(iterationLocal); BrFar(conditionLabel); // Body: // TimeoutCheck(); MarkLabel(bodyLabel); EmitTimeoutCheck(); // if ((uint)i >= (uint)slice.Length) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(atomicLoopDoneLabel); // if (slice[i] != ch) goto atomicLoopDoneLabel; Ldloca(slice); Ldloc(iterationLocal); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(atomicLoopDoneLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(atomicLoopDoneLabel); } else // IsNotoneFamily { BeqFar(atomicLoopDoneLabel); } } // i++; Ldloc(iterationLocal); Ldc(1); Add(); Stloc(iterationLocal); // if (i >= maxIterations) goto atomicLoopDoneLabel; MarkLabel(conditionLabel); if (maxIterations != int.MaxValue) { Ldloc(iterationLocal); Ldc(maxIterations); BltFar(bodyLabel); } else { BrFar(bodyLabel); } } // Done: MarkLabel(atomicLoopDoneLabel); // Check to ensure we've found at least min iterations. if (minIterations > 0) { Ldloc(iterationLocal); Ldc(minIterations); BltFar(doneLabel); } // Now that we've completed our optional iterations, advance the text span // and pos by the number of iterations completed. // slice = slice.Slice(i); Ldloca(slice); Ldloc(iterationLocal); Call(s_spanSliceIntMethod); Stloc(slice); // pos += i; Ldloc(pos); Ldloc(iterationLocal); Add(); Stloc(pos); } // Emits the code to handle a non-backtracking optional zero-or-one loop. void EmitAtomicSingleCharZeroOrOne(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M == 0 && node.N == 1); Label skipUpdatesLabel = DefineLabel(); // if ((uint)sliceStaticPos >= (uint)slice.Length) goto skipUpdatesLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(skipUpdatesLabel); // if (slice[sliceStaticPos] != ch) goto skipUpdatesLabel; Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(skipUpdatesLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(skipUpdatesLabel); } else // IsNotoneFamily { BeqFar(skipUpdatesLabel); } } // slice = slice.Slice(1); Ldloca(slice); Ldc(1); Call(s_spanSliceIntMethod); Stloc(slice); // pos++; Ldloc(pos); Ldc(1); Add(); Stloc(pos); MarkLabel(skipUpdatesLabel); } void EmitNonBacktrackingRepeater(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.M == node.N, $"Unexpected M={node.M} == N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(!analysis.MayBacktrack(node.Child(0)), $"Expected non-backtracking node {node.Kind}"); // Ensure every iteration of the loop sees a consistent value. TransferSliceStaticPosToPos(); // Loop M==N times to match the child exactly that numbers of times. Label condition = DefineLabel(); Label body = DefineLabel(); // for (int i = 0; ...) using RentedLocalBuilder i = RentInt32Local(); Ldc(0); Stloc(i); BrFar(condition); MarkLabel(body); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // make sure static the static position remains at 0 for subsequent constructs // for (...; ...; i++) Ldloc(i); Ldc(1); Add(); Stloc(i); // for (...; i < node.M; ...) MarkLabel(condition); Ldloc(i); Ldc(node.M); BltFar(body); } void EmitLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); Label originalDoneLabel = doneLabel; LocalBuilder startingPos = DeclareInt32(); LocalBuilder iterationCount = DeclareInt32(); Label body = DefineLabel(); Label endLoop = DefineLabel(); // iterationCount = 0; // startingPos = 0; Ldc(0); Stloc(iterationCount); Ldc(0); Stloc(startingPos); // Iteration body MarkLabel(body); EmitTimeoutCheck(); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackResizeIfNeeded(3); if (expressionHasCaptures) { // base.runstack[stackpos++] = base.Crawlpos(); EmitStackPush(() => { Ldthis(); Call(s_crawlposMethod); }); } EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(pos)); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. // iterationCount++; Ldloc(iterationCount); Ldc(1); Add(); Stloc(iterationCount); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. Label iterationFailedLabel = DefineLabel(); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 bool childBacktracks = doneLabel != iterationFailedLabel; // Loop condition. Continue iterating greedily if we've not yet reached the maximum. We also need to stop // iterating if the iteration matched empty and we already hit the minimum number of iterations. Otherwise, // we've matched as many iterations as we can with this configuration. Jump to what comes after the loop. switch ((minIterations > 0, maxIterations == int.MaxValue)) { case (true, true): // if (pos != startingPos || iterationCount < minIterations) goto body; // goto endLoop; Ldloc(pos); Ldloc(startingPos); BneFar(body); Ldloc(iterationCount); Ldc(minIterations); BltFar(body); BrFar(endLoop); break; case (true, false): // if ((pos != startingPos || iterationCount < minIterations) && iterationCount < maxIterations) goto body; // goto endLoop; Ldloc(iterationCount); Ldc(maxIterations); BgeFar(endLoop); Ldloc(pos); Ldloc(startingPos); BneFar(body); Ldloc(iterationCount); Ldc(minIterations); BltFar(body); BrFar(endLoop); break; case (false, true): // if (pos != startingPos) goto body; // goto endLoop; Ldloc(pos); Ldloc(startingPos); BneFar(body); BrFar(endLoop); break; case (false, false): // if (pos == startingPos || iterationCount >= maxIterations) goto endLoop; // goto body; Ldloc(pos); Ldloc(startingPos); BeqFar(endLoop); Ldloc(iterationCount); Ldc(maxIterations); BgeFar(endLoop); BrFar(body); break; } // Now handle what happens when an iteration fails, which could be an initial failure or it // could be while backtracking. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel); // iterationCount--; Ldloc(iterationCount); Ldc(1); Sub(); Stloc(iterationCount); // if (iterationCount < 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BltFar(originalDoneLabel); // pos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(pos); EmitStackPop(); Stloc(startingPos); if (expressionHasCaptures) { // int poppedCrawlPos = base.runstack[--stackpos]; // while (base.Crawlpos() > poppedCrawlPos) base.Uncapture(); using RentedLocalBuilder poppedCrawlPos = RentInt32Local(); EmitStackPop(); Stloc(poppedCrawlPos); EmitUncaptureUntil(poppedCrawlPos); } SliceInputSpan(); if (minIterations > 0) { // if (iterationCount == 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); // if (iterationCount < minIterations) goto doneLabel/originalDoneLabel; Ldloc(iterationCount); Ldc(minIterations); BltFar(childBacktracks ? doneLabel : originalDoneLabel); } if (isAtomic) { doneLabel = originalDoneLabel; MarkLabel(endLoop); } else { if (childBacktracks) { // goto endLoop; BrFar(endLoop); // Backtrack: Label backtrack = DefineLabel(); MarkLabel(backtrack); // if (iterationCount == 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; } MarkLabel(endLoop); if (node.IsInLoop()) { // Store the loop's state EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(iterationCount)); // Skip past the backtracking section // goto backtrackingEnd; Label backtrackingEnd = DefineLabel(); BrFar(backtrackingEnd); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // iterationCount = base.runstack[--runstack]; // startingPos = base.runstack[--runstack]; EmitStackPop(); Stloc(iterationCount); EmitStackPop(); Stloc(startingPos); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } } void EmitStackResizeIfNeeded(int count) { Debug.Assert(count >= 1); // if (stackpos >= base.runstack!.Length - (count - 1)) // { // Array.Resize(ref base.runstack, base.runstack.Length * 2); // } Label skipResize = DefineLabel(); Ldloc(stackpos); Ldthisfld(s_runstackField); Ldlen(); if (count > 1) { Ldc(count - 1); Sub(); } Blt(skipResize); Ldthis(); _ilg!.Emit(OpCodes.Ldflda, s_runstackField); Ldthisfld(s_runstackField); Ldlen(); Ldc(2); Mul(); Call(s_arrayResize); MarkLabel(skipResize); } void EmitStackPush(Action load) { // base.runstack[stackpos] = load(); Ldthisfld(s_runstackField); Ldloc(stackpos); load(); StelemI4(); // stackpos++; Ldloc(stackpos); Ldc(1); Add(); Stloc(stackpos); } void EmitStackPop() { // ... = base.runstack[--stackpos]; Ldthisfld(s_runstackField); Ldloc(stackpos); Ldc(1); Sub(); Stloc(stackpos); Ldloc(stackpos); LdelemI4(); } } protected void EmitScan(DynamicMethod tryFindNextStartingPositionMethod, DynamicMethod tryMatchAtCurrentPositionMethod) { Label returnLabel = DefineLabel(); // while (TryFindNextPossibleStartingPosition(text)) Label whileLoopBody = DefineLabel(); MarkLabel(whileLoopBody); Ldthis(); Ldarg_1(); Call(tryFindNextStartingPositionMethod); BrfalseFar(returnLabel); if (_hasTimeout) { // CheckTimeout(); Ldthis(); Call(s_checkTimeoutMethod); } // if (TryMatchAtCurrentPosition(text) || runtextpos == text.length) // return; Ldthis(); Ldarg_1(); Call(tryMatchAtCurrentPositionMethod); BrtrueFar(returnLabel); Ldthisfld(s_runtextposField); Ldarga_s(1); Call(s_spanGetLengthMethod); Ceq(); BrtrueFar(returnLabel); // runtextpos += 1 Ldthis(); Ldthisfld(s_runtextposField); Ldc(1); Add(); Stfld(s_runtextposField); // End loop body. BrFar(whileLoopBody); // return; MarkLabel(returnLabel); Ret(); } private void InitializeCultureForTryMatchAtCurrentPositionIfNecessary(AnalysisResults analysis) { _textInfo = null; if (analysis.HasIgnoreCase && (_options & RegexOptions.CultureInvariant) == 0) { // cache CultureInfo in local variable which saves excessive thread local storage accesses _textInfo = DeclareTextInfo(); InitLocalCultureInfo(); } } /// <summary>Emits a a check for whether the character is in the specified character class.</summary> /// <remarks>The character to be checked has already been loaded onto the stack.</remarks> private void EmitMatchCharacterClass(string charClass, bool caseInsensitive) { // We need to perform the equivalent of calling RegexRunner.CharInClass(ch, charClass), // but that call is relatively expensive. Before we fall back to it, we try to optimize // some common cases for which we can do much better, such as known character classes // for which we can call a dedicated method, or a fast-path for ASCII using a lookup table. // First, see if the char class is a built-in one for which there's a better function // we can just call directly. Everything in this section must work correctly for both // case-sensitive and case-insensitive modes, regardless of culture. switch (charClass) { case RegexCharClass.AnyClass: // true Pop(); Ldc(1); return; case RegexCharClass.DigitClass: // char.IsDigit(ch) Call(s_charIsDigitMethod); return; case RegexCharClass.NotDigitClass: // !char.IsDigit(ch) Call(s_charIsDigitMethod); Ldc(0); Ceq(); return; case RegexCharClass.SpaceClass: // char.IsWhiteSpace(ch) Call(s_charIsWhiteSpaceMethod); return; case RegexCharClass.NotSpaceClass: // !char.IsWhiteSpace(ch) Call(s_charIsWhiteSpaceMethod); Ldc(0); Ceq(); return; case RegexCharClass.WordClass: // RegexRunner.IsWordChar(ch) Call(s_isWordCharMethod); return; case RegexCharClass.NotWordClass: // !RegexRunner.IsWordChar(ch) Call(s_isWordCharMethod); Ldc(0); Ceq(); return; } // If we're meant to be doing a case-insensitive lookup, and if we're not using the invariant culture, // lowercase the input. If we're using the invariant culture, we may still end up calling ToLower later // on, but we may also be able to avoid it, in particular in the case of our lookup table, where we can // generate the lookup table already factoring in the invariant case sensitivity. There are multiple // special-code paths between here and the lookup table, but we only take those if invariant is false; // if it were true, they'd need to use CallToLower(). bool invariant = false; if (caseInsensitive) { invariant = UseToLowerInvariant; if (!invariant) { CallToLower(); } } // Next, handle simple sets of one range, e.g. [A-Z], [0-9], etc. This includes some built-in classes, like ECMADigitClass. if (!invariant && RegexCharClass.TryGetSingleRange(charClass, out char lowInclusive, out char highInclusive)) { if (lowInclusive == highInclusive) { // ch == charClass[3] Ldc(lowInclusive); Ceq(); } else { // (uint)ch - lowInclusive < highInclusive - lowInclusive + 1 Ldc(lowInclusive); Sub(); Ldc(highInclusive - lowInclusive + 1); CltUn(); } // Negate the answer if the negation flag was set if (RegexCharClass.IsNegated(charClass)) { Ldc(0); Ceq(); } return; } // Next if the character class contains nothing but a single Unicode category, we can calle char.GetUnicodeCategory and // compare against it. It has a fast-lookup path for ASCII, so is as good or better than any lookup we'd generate (plus // we get smaller code), and it's what we'd do for the fallback (which we get to avoid generating) as part of CharInClass. if (!invariant && RegexCharClass.TryGetSingleUnicodeCategory(charClass, out UnicodeCategory category, out bool negated)) { // char.GetUnicodeCategory(ch) == category Call(s_charGetUnicodeInfo); Ldc((int)category); Ceq(); if (negated) { Ldc(0); Ceq(); } return; } // All checks after this point require reading the input character multiple times, // so we store it into a temporary local. using RentedLocalBuilder tempLocal = RentInt32Local(); Stloc(tempLocal); // Next, if there's only 2 or 3 chars in the set (fairly common due to the sets we create for prefixes), // it's cheaper and smaller to compare against each than it is to use a lookup table. if (!invariant && !RegexCharClass.IsNegated(charClass)) { Span<char> setChars = stackalloc char[3]; int numChars = RegexCharClass.GetSetChars(charClass, setChars); if (numChars is 2 or 3) { if (RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out int mask)) // special-case common case of an upper and lowercase ASCII letter combination { // ((ch | mask) == setChars[1]) Ldloc(tempLocal); Ldc(mask); Or(); Ldc(setChars[1] | mask); Ceq(); } else { // (ch == setChars[0]) | (ch == setChars[1]) Ldloc(tempLocal); Ldc(setChars[0]); Ceq(); Ldloc(tempLocal); Ldc(setChars[1]); Ceq(); Or(); } // | (ch == setChars[2]) if (numChars == 3) { Ldloc(tempLocal); Ldc(setChars[2]); Ceq(); Or(); } return; } } using RentedLocalBuilder resultLocal = RentInt32Local(); // Analyze the character set more to determine what code to generate. RegexCharClass.CharClassAnalysisResults analysis = RegexCharClass.Analyze(charClass); // Helper method that emits a call to RegexRunner.CharInClass(ch{.ToLowerInvariant()}, charClass) void EmitCharInClass() { Ldloc(tempLocal); if (invariant) { CallToLower(); } Ldstr(charClass); Call(s_charInClassMethod); Stloc(resultLocal); } Label doneLabel = DefineLabel(); Label comparisonLabel = DefineLabel(); if (!invariant) // if we're being asked to do a case insensitive, invariant comparison, use the lookup table { if (analysis.ContainsNoAscii) { // We determined that the character class contains only non-ASCII, // for example if the class were [\p{IsGreek}\p{IsGreekExtended}], which is // the same as [\u0370-\u03FF\u1F00-1FFF]. (In the future, we could possibly // extend the analysis to produce a known lower-bound and compare against // that rather than always using 128 as the pivot point.) // ch >= 128 && RegexRunner.CharInClass(ch, "...") Ldloc(tempLocal); Ldc(128); Blt(comparisonLabel); EmitCharInClass(); Br(doneLabel); MarkLabel(comparisonLabel); Ldc(0); Stloc(resultLocal); MarkLabel(doneLabel); Ldloc(resultLocal); return; } if (analysis.AllAsciiContained) { // We determined that every ASCII character is in the class, for example // if the class were the negated example from case 1 above: // [^\p{IsGreek}\p{IsGreekExtended}]. // ch < 128 || RegexRunner.CharInClass(ch, "...") Ldloc(tempLocal); Ldc(128); Blt(comparisonLabel); EmitCharInClass(); Br(doneLabel); MarkLabel(comparisonLabel); Ldc(1); Stloc(resultLocal); MarkLabel(doneLabel); Ldloc(resultLocal); return; } } // Now, our big hammer is to generate a lookup table that lets us quickly index by character into a yes/no // answer as to whether the character is in the target character class. However, we don't want to store // a lookup table for every possible character for every character class in the regular expression; at one // bit for each of 65K characters, that would be an 8K bitmap per character class. Instead, we handle the // common case of ASCII input via such a lookup table, which at one bit for each of 128 characters is only // 16 bytes per character class. We of course still need to be able to handle inputs that aren't ASCII, so // we check the input against 128, and have a fallback if the input is >= to it. Determining the right // fallback could itself be expensive. For example, if it's possible that a value >= 128 could match the // character class, we output a call to RegexRunner.CharInClass, but we don't want to have to enumerate the // entire character class evaluating every character against it, just to determine whether it's a match. // Instead, we employ some quick heuristics that will always ensure we provide a correct answer even if // we could have sometimes generated better code to give that answer. // Generate the lookup table to store 128 answers as bits. We use a const string instead of a byte[] / static // data property because it lets IL emit handle all the details for us. string bitVectorString = string.Create(8, (charClass, invariant), static (dest, state) => // String length is 8 chars == 16 bytes == 128 bits. { for (int i = 0; i < 128; i++) { char c = (char)i; bool isSet = state.invariant ? RegexCharClass.CharInClass(char.ToLowerInvariant(c), state.charClass) : RegexCharClass.CharInClass(c, state.charClass); if (isSet) { dest[i >> 4] |= (char)(1 << (i & 0xF)); } } }); // We determined that the character class may contain ASCII, so we // output the lookup against the lookup table. // ch < 128 ? (bitVectorString[ch >> 4] & (1 << (ch & 0xF))) != 0 : Ldloc(tempLocal); Ldc(128); Bge(comparisonLabel); Ldstr(bitVectorString); Ldloc(tempLocal); Ldc(4); Shr(); Call(s_stringGetCharsMethod); Ldc(1); Ldloc(tempLocal); Ldc(15); And(); Ldc(31); And(); Shl(); And(); Ldc(0); CgtUn(); Stloc(resultLocal); Br(doneLabel); MarkLabel(comparisonLabel); if (analysis.ContainsOnlyAscii) { // We know that all inputs that could match are ASCII, for example if the // character class were [A-Za-z0-9], so since the ch is now known to be >= 128, we // can just fail the comparison. Ldc(0); Stloc(resultLocal); } else if (analysis.AllNonAsciiContained) { // We know that all non-ASCII inputs match, for example if the character // class were [^\r\n], so since we just determined the ch to be >= 128, we can just // give back success. Ldc(1); Stloc(resultLocal); } else { // We know that the whole class wasn't ASCII, and we don't know anything about the non-ASCII // characters other than that some might be included, for example if the character class // were [\w\d], so since ch >= 128, we need to fall back to calling CharInClass. EmitCharInClass(); } MarkLabel(doneLabel); Ldloc(resultLocal); } /// <summary>Emits a timeout check.</summary> private void EmitTimeoutCheck() { if (!_hasTimeout) { return; } Debug.Assert(_loopTimeoutCounter != null); // Increment counter for each loop iteration. Ldloc(_loopTimeoutCounter); Ldc(1); Add(); Stloc(_loopTimeoutCounter); // Emit code to check the timeout every 2048th iteration. Label label = DefineLabel(); Ldloc(_loopTimeoutCounter); Ldc(LoopTimeoutCheckCount); RemUn(); Brtrue(label); Ldthis(); Call(s_checkTimeoutMethod); MarkLabel(label); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices; using System.Threading; namespace System.Text.RegularExpressions { /// <summary> /// RegexCompiler translates a block of RegexCode to MSIL, and creates a subclass of the RegexRunner type. /// </summary> internal abstract class RegexCompiler { private static readonly FieldInfo s_runtextstartField = RegexRunnerField("runtextstart"); private static readonly FieldInfo s_runtextposField = RegexRunnerField("runtextpos"); private static readonly FieldInfo s_runstackField = RegexRunnerField("runstack"); private static readonly MethodInfo s_captureMethod = RegexRunnerMethod("Capture"); private static readonly MethodInfo s_transferCaptureMethod = RegexRunnerMethod("TransferCapture"); private static readonly MethodInfo s_uncaptureMethod = RegexRunnerMethod("Uncapture"); private static readonly MethodInfo s_isMatchedMethod = RegexRunnerMethod("IsMatched"); private static readonly MethodInfo s_matchLengthMethod = RegexRunnerMethod("MatchLength"); private static readonly MethodInfo s_matchIndexMethod = RegexRunnerMethod("MatchIndex"); private static readonly MethodInfo s_isBoundaryMethod = typeof(RegexRunner).GetMethod("IsBoundary", BindingFlags.NonPublic | BindingFlags.Instance, new[] { typeof(ReadOnlySpan<char>), typeof(int) })!; private static readonly MethodInfo s_isWordCharMethod = RegexRunnerMethod("IsWordChar"); private static readonly MethodInfo s_isECMABoundaryMethod = typeof(RegexRunner).GetMethod("IsECMABoundary", BindingFlags.NonPublic | BindingFlags.Instance, new[] { typeof(ReadOnlySpan<char>), typeof(int) })!; private static readonly MethodInfo s_crawlposMethod = RegexRunnerMethod("Crawlpos"); private static readonly MethodInfo s_charInClassMethod = RegexRunnerMethod("CharInClass"); private static readonly MethodInfo s_checkTimeoutMethod = RegexRunnerMethod("CheckTimeout"); private static readonly MethodInfo s_charIsDigitMethod = typeof(char).GetMethod("IsDigit", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charIsWhiteSpaceMethod = typeof(char).GetMethod("IsWhiteSpace", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charGetUnicodeInfo = typeof(char).GetMethod("GetUnicodeCategory", new Type[] { typeof(char) })!; private static readonly MethodInfo s_charToLowerInvariantMethod = typeof(char).GetMethod("ToLowerInvariant", new Type[] { typeof(char) })!; private static readonly MethodInfo s_cultureInfoGetCurrentCultureMethod = typeof(CultureInfo).GetMethod("get_CurrentCulture")!; private static readonly MethodInfo s_cultureInfoGetTextInfoMethod = typeof(CultureInfo).GetMethod("get_TextInfo")!; private static readonly MethodInfo s_spanGetItemMethod = typeof(ReadOnlySpan<char>).GetMethod("get_Item", new Type[] { typeof(int) })!; private static readonly MethodInfo s_spanGetLengthMethod = typeof(ReadOnlySpan<char>).GetMethod("get_Length")!; private static readonly MethodInfo s_memoryMarshalGetReference = typeof(MemoryMarshal).GetMethod("GetReference", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfChar = typeof(MemoryExtensions).GetMethod("IndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfSpan = typeof(MemoryExtensions).GetMethod("IndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnyCharChar = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnyCharCharChar = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanIndexOfAnySpan = typeof(MemoryExtensions).GetMethod("IndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfChar = typeof(MemoryExtensions).GetMethod("LastIndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnyCharChar = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnyCharCharChar = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0), Type.MakeGenericMethodParameter(0) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfAnySpan = typeof(MemoryExtensions).GetMethod("LastIndexOfAny", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanLastIndexOfSpan = typeof(MemoryExtensions).GetMethod("LastIndexOf", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_spanSliceIntMethod = typeof(ReadOnlySpan<char>).GetMethod("Slice", new Type[] { typeof(int) })!; private static readonly MethodInfo s_spanSliceIntIntMethod = typeof(ReadOnlySpan<char>).GetMethod("Slice", new Type[] { typeof(int), typeof(int) })!; private static readonly MethodInfo s_spanStartsWith = typeof(MemoryExtensions).GetMethod("StartsWith", new Type[] { typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(ReadOnlySpan<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) })!.MakeGenericMethod(typeof(char)); private static readonly MethodInfo s_stringAsSpanMethod = typeof(MemoryExtensions).GetMethod("AsSpan", new Type[] { typeof(string) })!; private static readonly MethodInfo s_stringGetCharsMethod = typeof(string).GetMethod("get_Chars", new Type[] { typeof(int) })!; private static readonly MethodInfo s_textInfoToLowerMethod = typeof(TextInfo).GetMethod("ToLower", new Type[] { typeof(char) })!; private static readonly MethodInfo s_arrayResize = typeof(Array).GetMethod("Resize")!.MakeGenericMethod(typeof(int)); private static readonly MethodInfo s_mathMinIntInt = typeof(Math).GetMethod("Min", new Type[] { typeof(int), typeof(int) })!; /// <summary>The ILGenerator currently in use.</summary> protected ILGenerator? _ilg; /// <summary>The options for the expression.</summary> protected RegexOptions _options; /// <summary>The <see cref="RegexTree"/> written for the expression.</summary> protected RegexTree? _regexTree; /// <summary>Whether this expression has a non-infinite timeout.</summary> protected bool _hasTimeout; /// <summary>Pool of Int32 LocalBuilders.</summary> private Stack<LocalBuilder>? _int32LocalsPool; /// <summary>Pool of ReadOnlySpan of char locals.</summary> private Stack<LocalBuilder>? _readOnlySpanCharLocalsPool; /// <summary>Local representing a cached TextInfo for the culture to use for all case-insensitive operations.</summary> private LocalBuilder? _textInfo; /// <summary>Local representing a timeout counter for loops (set loops and node loops).</summary> private LocalBuilder? _loopTimeoutCounter; /// <summary>A frequency with which the timeout should be validated.</summary> private const int LoopTimeoutCheckCount = 2048; private static FieldInfo RegexRunnerField(string fieldname) => typeof(RegexRunner).GetField(fieldname, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static)!; private static MethodInfo RegexRunnerMethod(string methname) => typeof(RegexRunner).GetMethod(methname, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance | BindingFlags.Static)!; /// <summary> /// Entry point to dynamically compile a regular expression. The expression is compiled to /// an in-memory assembly. /// </summary> internal static RegexRunnerFactory? Compile(string pattern, RegexTree regexTree, RegexOptions options, bool hasTimeout) => new RegexLWCGCompiler().FactoryInstanceFromCode(pattern, regexTree, options, hasTimeout); /// <summary>A macro for _ilg.DefineLabel</summary> private Label DefineLabel() => _ilg!.DefineLabel(); /// <summary>A macro for _ilg.MarkLabel</summary> private void MarkLabel(Label l) => _ilg!.MarkLabel(l); /// <summary>A macro for _ilg.Emit(Opcodes.Ldstr, str)</summary> protected void Ldstr(string str) => _ilg!.Emit(OpCodes.Ldstr, str); /// <summary>A macro for the various forms of Ldc.</summary> protected void Ldc(int i) => _ilg!.Emit(OpCodes.Ldc_I4, i); /// <summary>A macro for _ilg.Emit(OpCodes.Ldc_I8).</summary> protected void LdcI8(long i) => _ilg!.Emit(OpCodes.Ldc_I8, i); /// <summary>A macro for _ilg.Emit(OpCodes.Ret).</summary> protected void Ret() => _ilg!.Emit(OpCodes.Ret); /// <summary>A macro for _ilg.Emit(OpCodes.Dup).</summary> protected void Dup() => _ilg!.Emit(OpCodes.Dup); /// <summary>A macro for _ilg.Emit(OpCodes.Rem_Un).</summary> private void RemUn() => _ilg!.Emit(OpCodes.Rem_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Ceq).</summary> private void Ceq() => _ilg!.Emit(OpCodes.Ceq); /// <summary>A macro for _ilg.Emit(OpCodes.Cgt_Un).</summary> private void CgtUn() => _ilg!.Emit(OpCodes.Cgt_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Clt_Un).</summary> private void CltUn() => _ilg!.Emit(OpCodes.Clt_Un); /// <summary>A macro for _ilg.Emit(OpCodes.Pop).</summary> private void Pop() => _ilg!.Emit(OpCodes.Pop); /// <summary>A macro for _ilg.Emit(OpCodes.Add).</summary> private void Add() => _ilg!.Emit(OpCodes.Add); /// <summary>A macro for _ilg.Emit(OpCodes.Sub).</summary> private void Sub() => _ilg!.Emit(OpCodes.Sub); /// <summary>A macro for _ilg.Emit(OpCodes.Mul).</summary> private void Mul() => _ilg!.Emit(OpCodes.Mul); /// <summary>A macro for _ilg.Emit(OpCodes.And).</summary> private void And() => _ilg!.Emit(OpCodes.And); /// <summary>A macro for _ilg.Emit(OpCodes.Or).</summary> private void Or() => _ilg!.Emit(OpCodes.Or); /// <summary>A macro for _ilg.Emit(OpCodes.Shl).</summary> private void Shl() => _ilg!.Emit(OpCodes.Shl); /// <summary>A macro for _ilg.Emit(OpCodes.Shr).</summary> private void Shr() => _ilg!.Emit(OpCodes.Shr); /// <summary>A macro for _ilg.Emit(OpCodes.Ldloc).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Ldloc(LocalBuilder lt) => _ilg!.Emit(OpCodes.Ldloc, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldloca).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Ldloca(LocalBuilder lt) => _ilg!.Emit(OpCodes.Ldloca, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_U2).</summary> private void LdindU2() => _ilg!.Emit(OpCodes.Ldind_U2); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_I4).</summary> private void LdindI4() => _ilg!.Emit(OpCodes.Ldind_I4); /// <summary>A macro for _ilg.Emit(OpCodes.Ldind_I8).</summary> private void LdindI8() => _ilg!.Emit(OpCodes.Ldind_I8); /// <summary>A macro for _ilg.Emit(OpCodes.Unaligned).</summary> private void Unaligned(byte alignment) => _ilg!.Emit(OpCodes.Unaligned, alignment); /// <summary>A macro for _ilg.Emit(OpCodes.Stloc).</summary> /// <remarks>ILGenerator will switch to the optimal form based on the local's index.</remarks> private void Stloc(LocalBuilder lt) => _ilg!.Emit(OpCodes.Stloc, lt); /// <summary>A macro for _ilg.Emit(OpCodes.Ldarg_0).</summary> protected void Ldthis() => _ilg!.Emit(OpCodes.Ldarg_0); /// <summary>A macro for _ilgEmit(OpCodes.Ldarg_1) </summary> private void Ldarg_1() => _ilg!.Emit(OpCodes.Ldarg_1); /// <summary>A macro for Ldthis(); Ldfld();</summary> protected void Ldthisfld(FieldInfo ft) { Ldthis(); _ilg!.Emit(OpCodes.Ldfld, ft); } /// <summary>Fetches the address of argument in passed in <paramref name="position"/></summary> /// <param name="position">The position of the argument which address needs to be fetched.</param> private void Ldarga_s(int position) => _ilg!.Emit(OpCodes.Ldarga_S, position); /// <summary>A macro for Ldthis(); Ldfld(); Stloc();</summary> private void Mvfldloc(FieldInfo ft, LocalBuilder lt) { Ldthisfld(ft); Stloc(lt); } /// <summary>A macro for _ilg.Emit(OpCodes.Stfld).</summary> protected void Stfld(FieldInfo ft) => _ilg!.Emit(OpCodes.Stfld, ft); /// <summary>A macro for _ilg.Emit(OpCodes.Callvirt, mt).</summary> protected void Callvirt(MethodInfo mt) => _ilg!.Emit(OpCodes.Callvirt, mt); /// <summary>A macro for _ilg.Emit(OpCodes.Call, mt).</summary> protected void Call(MethodInfo mt) => _ilg!.Emit(OpCodes.Call, mt); /// <summary>A macro for _ilg.Emit(OpCodes.Brfalse) (long form).</summary> private void BrfalseFar(Label l) => _ilg!.Emit(OpCodes.Brfalse, l); /// <summary>A macro for _ilg.Emit(OpCodes.Brtrue) (long form).</summary> private void BrtrueFar(Label l) => _ilg!.Emit(OpCodes.Brtrue, l); /// <summary>A macro for _ilg.Emit(OpCodes.Br) (long form).</summary> private void BrFar(Label l) => _ilg!.Emit(OpCodes.Br, l); /// <summary>A macro for _ilg.Emit(OpCodes.Ble) (long form).</summary> private void BleFar(Label l) => _ilg!.Emit(OpCodes.Ble, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt) (long form).</summary> private void BltFar(Label l) => _ilg!.Emit(OpCodes.Blt, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt_Un) (long form).</summary> private void BltUnFar(Label l) => _ilg!.Emit(OpCodes.Blt_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge) (long form).</summary> private void BgeFar(Label l) => _ilg!.Emit(OpCodes.Bge, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_Un) (long form).</summary> private void BgeUnFar(Label l) => _ilg!.Emit(OpCodes.Bge_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bne) (long form).</summary> private void BneFar(Label l) => _ilg!.Emit(OpCodes.Bne_Un, l); /// <summary>A macro for _ilg.Emit(OpCodes.Beq) (long form).</summary> private void BeqFar(Label l) => _ilg!.Emit(OpCodes.Beq, l); /// <summary>A macro for _ilg.Emit(OpCodes.Brtrue_S) (short jump).</summary> private void Brtrue(Label l) => _ilg!.Emit(OpCodes.Brtrue_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Br_S) (short jump).</summary> private void Br(Label l) => _ilg!.Emit(OpCodes.Br_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Ble_S) (short jump).</summary> private void Ble(Label l) => _ilg!.Emit(OpCodes.Ble_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Blt_S) (short jump).</summary> private void Blt(Label l) => _ilg!.Emit(OpCodes.Blt_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_S) (short jump).</summary> private void Bge(Label l) => _ilg!.Emit(OpCodes.Bge_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bge_Un_S) (short jump).</summary> private void BgeUn(Label l) => _ilg!.Emit(OpCodes.Bge_Un_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bgt_S) (short jump).</summary> private void Bgt(Label l) => _ilg!.Emit(OpCodes.Bgt_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Bne_S) (short jump).</summary> private void Bne(Label l) => _ilg!.Emit(OpCodes.Bne_Un_S, l); /// <summary>A macro for _ilg.Emit(OpCodes.Beq_S) (short jump).</summary> private void Beq(Label l) => _ilg!.Emit(OpCodes.Beq_S, l); /// <summary>A macro for the Ldlen instruction.</summary> private void Ldlen() => _ilg!.Emit(OpCodes.Ldlen); /// <summary>A macro for the Ldelem_I4 instruction.</summary> private void LdelemI4() => _ilg!.Emit(OpCodes.Ldelem_I4); /// <summary>A macro for the Stelem_I4 instruction.</summary> private void StelemI4() => _ilg!.Emit(OpCodes.Stelem_I4); private void Switch(Label[] table) => _ilg!.Emit(OpCodes.Switch, table); /// <summary>Declares a local bool.</summary> private LocalBuilder DeclareBool() => _ilg!.DeclareLocal(typeof(bool)); /// <summary>Declares a local int.</summary> private LocalBuilder DeclareInt32() => _ilg!.DeclareLocal(typeof(int)); /// <summary>Declares a local CultureInfo.</summary> private LocalBuilder? DeclareTextInfo() => _ilg!.DeclareLocal(typeof(TextInfo)); /// <summary>Declares a local string.</summary> private LocalBuilder DeclareString() => _ilg!.DeclareLocal(typeof(string)); private LocalBuilder DeclareReadOnlySpanChar() => _ilg!.DeclareLocal(typeof(ReadOnlySpan<char>)); /// <summary>Rents an Int32 local variable slot from the pool of locals.</summary> /// <remarks> /// Care must be taken to Dispose of the returned <see cref="RentedLocalBuilder"/> when it's no longer needed, /// and also not to jump into the middle of a block involving a rented local from outside of that block. /// </remarks> private RentedLocalBuilder RentInt32Local() => new RentedLocalBuilder( _int32LocalsPool ??= new Stack<LocalBuilder>(), _int32LocalsPool.TryPop(out LocalBuilder? iterationLocal) ? iterationLocal : DeclareInt32()); /// <summary>Rents a ReadOnlySpan(char) local variable slot from the pool of locals.</summary> /// <remarks> /// Care must be taken to Dispose of the returned <see cref="RentedLocalBuilder"/> when it's no longer needed, /// and also not to jump into the middle of a block involving a rented local from outside of that block. /// </remarks> private RentedLocalBuilder RentReadOnlySpanCharLocal() => new RentedLocalBuilder( _readOnlySpanCharLocalsPool ??= new Stack<LocalBuilder>(1), // capacity == 1 as we currently don't expect overlapping instances _readOnlySpanCharLocalsPool.TryPop(out LocalBuilder? iterationLocal) ? iterationLocal : DeclareReadOnlySpanChar()); /// <summary>Returned a rented local to the pool.</summary> private struct RentedLocalBuilder : IDisposable { private readonly Stack<LocalBuilder> _pool; private readonly LocalBuilder _local; internal RentedLocalBuilder(Stack<LocalBuilder> pool, LocalBuilder local) { _local = local; _pool = pool; } public static implicit operator LocalBuilder(RentedLocalBuilder local) => local._local; public void Dispose() { Debug.Assert(_pool != null); Debug.Assert(_local != null); Debug.Assert(!_pool.Contains(_local)); _pool.Push(_local); this = default; } } /// <summary>Sets the culture local to CultureInfo.CurrentCulture.</summary> private void InitLocalCultureInfo() { Debug.Assert(_textInfo != null); Call(s_cultureInfoGetCurrentCultureMethod); Callvirt(s_cultureInfoGetTextInfoMethod); Stloc(_textInfo); } /// <summary>Whether ToLower operations should be performed with the invariant culture as opposed to the one in <see cref="_textInfo"/>.</summary> private bool UseToLowerInvariant => _textInfo == null || (_options & RegexOptions.CultureInvariant) != 0; /// <summary>Invokes either char.ToLowerInvariant(c) or _textInfo.ToLower(c).</summary> private void CallToLower() { if (UseToLowerInvariant) { Call(s_charToLowerInvariantMethod); } else { using RentedLocalBuilder currentCharLocal = RentInt32Local(); Stloc(currentCharLocal); Ldloc(_textInfo!); Ldloc(currentCharLocal); Callvirt(s_textInfoToLowerMethod); } } /// <summary>Generates the implementation for TryFindNextPossibleStartingPosition.</summary> protected void EmitTryFindNextPossibleStartingPosition() { Debug.Assert(_regexTree != null); _int32LocalsPool?.Clear(); _readOnlySpanCharLocalsPool?.Clear(); LocalBuilder inputSpan = DeclareReadOnlySpanChar(); LocalBuilder pos = DeclareInt32(); _textInfo = null; if ((_options & RegexOptions.CultureInvariant) == 0) { bool needsCulture = _regexTree.FindOptimizations.FindMode switch { FindNextStartingPositionMode.FixedLiteral_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive or FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive => true, _ when _regexTree.FindOptimizations.FixedDistanceSets is List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)> sets => sets.Exists(set => set.CaseInsensitive), _ => false, }; if (needsCulture) { _textInfo = DeclareTextInfo(); InitLocalCultureInfo(); } } // Load necessary locals // int pos = base.runtextpos; // ReadOnlySpan<char> inputSpan = dynamicMethodArg; // TODO: We can reference the arg directly rather than using another local. Mvfldloc(s_runtextposField, pos); Ldarg_1(); Stloc(inputSpan); // Generate length check. If the input isn't long enough to possibly match, fail quickly. // It's rare for min required length to be 0, so we don't bother special-casing the check, // especially since we want the "return false" code regardless. int minRequiredLength = _regexTree.FindOptimizations.MinRequiredLength; Debug.Assert(minRequiredLength >= 0); Label returnFalse = DefineLabel(); Label finishedLengthCheck = DefineLabel(); // if (pos > inputSpan.Length - _code.Tree.MinRequiredLength) // { // base.runtextpos = inputSpan.Length; // return false; // } Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); if (minRequiredLength > 0) { Ldc(minRequiredLength); Sub(); } Ble(finishedLengthCheck); MarkLabel(returnFalse); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Stfld(s_runtextposField); Ldc(0); Ret(); MarkLabel(finishedLengthCheck); // Emit any anchors. if (GenerateAnchors()) { return; } // Either anchors weren't specified, or they don't completely root all matches to a specific location. switch (_regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingPrefix_LeftToRight_CaseSensitive: Debug.Assert(!string.IsNullOrEmpty(_regexTree.FindOptimizations.LeadingCaseSensitivePrefix)); EmitIndexOf_LeftToRight(_regexTree.FindOptimizations.LeadingCaseSensitivePrefix); break; case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.LeadingSet_LeftToRight_CaseInsensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseSensitive: case FindNextStartingPositionMode.FixedSets_LeftToRight_CaseInsensitive: Debug.Assert(_regexTree.FindOptimizations.FixedDistanceSets is { Count: > 0 }); EmitFixedSet_LeftToRight(); break; case FindNextStartingPositionMode.LiteralAfterLoop_LeftToRight_CaseSensitive: Debug.Assert(_regexTree.FindOptimizations.LiteralAfterLoop is not null); EmitLiteralAfterAtomicLoop(); break; default: Debug.Fail($"Unexpected mode: {_regexTree.FindOptimizations.FindMode}"); goto case FindNextStartingPositionMode.NoSearch; case FindNextStartingPositionMode.NoSearch: // return true; Ldc(1); Ret(); break; } // Emits any anchors. Returns true if the anchor roots any match to a specific location and thus no further // searching is required; otherwise, false. bool GenerateAnchors() { Label label; // Anchors that fully implement TryFindNextPossibleStartingPosition, with a check that leads to immediate success or failure determination. switch (_regexTree.FindOptimizations.FindMode) { case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Beginning: label = DefineLabel(); Ldloc(pos); Ldc(0); Ble(label); Br(returnFalse); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_Start: label = DefineLabel(); Ldloc(pos); Ldthisfld(s_runtextstartField); Ble(label); Br(returnFalse); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_EndZ: label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(1); Sub(); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(1); Sub(); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.LeadingAnchor_LeftToRight_End: label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_End: case FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ: // Jump to the end, minus the min required length, which in this case is actually the fixed length. { int extraNewlineBump = _regexTree.FindOptimizations.FindMode == FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ ? 1 : 0; label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(_regexTree.FindOptimizations.MinRequiredLength + extraNewlineBump); Sub(); Bge(label); Ldthis(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(_regexTree.FindOptimizations.MinRequiredLength + extraNewlineBump); Sub(); Stfld(s_runtextposField); MarkLabel(label); Ldc(1); Ret(); return true; } } // Now handle anchors that boost the position but don't determine immediate success or failure. switch (_regexTree.FindOptimizations.LeadingAnchor) { case RegexNodeKind.Bol: { // Optimize the handling of a Beginning-Of-Line (BOL) anchor. BOL is special, in that unlike // other anchors like Beginning, there are potentially multiple places a BOL can match. So unlike // the other anchors, which all skip all subsequent processing if found, with BOL we just use it // to boost our position to the next line, and then continue normally with any prefix or char class searches. label = DefineLabel(); // if (pos > 0... Ldloc(pos!); Ldc(0); Ble(label); // ... && inputSpan[pos - 1] != '\n') { ... } Ldloca(inputSpan); Ldloc(pos); Ldc(1); Sub(); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); Beq(label); // int tmp = inputSpan.Slice(pos).IndexOf('\n'); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Ldc('\n'); Call(s_spanIndexOfChar); using (RentedLocalBuilder newlinePos = RentInt32Local()) { Stloc(newlinePos); // if (newlinePos < 0 || newlinePos + pos + 1 > inputSpan.Length) // { // base.runtextpos = inputSpan.Length; // return false; // } Ldloc(newlinePos); Ldc(0); Blt(returnFalse); Ldloc(newlinePos); Ldloc(pos); Add(); Ldc(1); Add(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Bgt(returnFalse); // pos += newlinePos + 1; Ldloc(pos); Ldloc(newlinePos); Add(); Ldc(1); Add(); Stloc(pos); } MarkLabel(label); } break; } switch (_regexTree.FindOptimizations.TrailingAnchor) { case RegexNodeKind.End or RegexNodeKind.EndZ when _regexTree.FindOptimizations.MaxPossibleLength is int maxLength: // Jump to the end, minus the max allowed length. { int extraNewlineBump = _regexTree.FindOptimizations.FindMode == FindNextStartingPositionMode.TrailingAnchor_FixedLength_LeftToRight_EndZ ? 1 : 0; label = DefineLabel(); Ldloc(pos); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(maxLength + extraNewlineBump); Sub(); Bge(label); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldc(maxLength + extraNewlineBump); Sub(); Stloc(pos); MarkLabel(label); break; } } return false; } void EmitIndexOf_LeftToRight(string prefix) { using RentedLocalBuilder i = RentInt32Local(); // int i = inputSpan.Slice(pos).IndexOf(prefix); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Ldstr(prefix); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); Stloc(i); // if (i < 0) goto ReturnFalse; Ldloc(i); Ldc(0); BltFar(returnFalse); // base.runtextpos = pos + i; // return true; Ldthis(); Ldloc(pos); Ldloc(i); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); } void EmitFixedSet_LeftToRight() { List<(char[]? Chars, string Set, int Distance, bool CaseInsensitive)>? sets = _regexTree.FindOptimizations.FixedDistanceSets; (char[]? Chars, string Set, int Distance, bool CaseInsensitive) primarySet = sets![0]; const int MaxSets = 4; int setsToUse = Math.Min(sets.Count, MaxSets); using RentedLocalBuilder iLocal = RentInt32Local(); using RentedLocalBuilder textSpanLocal = RentReadOnlySpanCharLocal(); // ReadOnlySpan<char> span = inputSpan.Slice(pos); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(textSpanLocal); // If we can use IndexOf{Any}, try to accelerate the skip loop via vectorization to match the first prefix. // We can use it if this is a case-sensitive class with a small number of characters in the class. int setIndex = 0; bool canUseIndexOf = !primarySet.CaseInsensitive && primarySet.Chars is not null; bool needLoop = !canUseIndexOf || setsToUse > 1; Label checkSpanLengthLabel = default; Label charNotInClassLabel = default; Label loopBody = default; if (needLoop) { checkSpanLengthLabel = DefineLabel(); charNotInClassLabel = DefineLabel(); loopBody = DefineLabel(); // for (int i = 0; Ldc(0); Stloc(iLocal); BrFar(checkSpanLengthLabel); MarkLabel(loopBody); } if (canUseIndexOf) { setIndex = 1; if (needLoop) { // slice.Slice(iLocal + primarySet.Distance); Ldloca(textSpanLocal); Ldloc(iLocal); if (primarySet.Distance != 0) { Ldc(primarySet.Distance); Add(); } Call(s_spanSliceIntMethod); } else if (primarySet.Distance != 0) { // slice.Slice(primarySet.Distance) Ldloca(textSpanLocal); Ldc(primarySet.Distance); Call(s_spanSliceIntMethod); } else { // slice Ldloc(textSpanLocal); } switch (primarySet.Chars!.Length) { case 1: // tmp = ...IndexOf(setChars[0]); Ldc(primarySet.Chars[0]); Call(s_spanIndexOfChar); break; case 2: // tmp = ...IndexOfAny(setChars[0], setChars[1]); Ldc(primarySet.Chars[0]); Ldc(primarySet.Chars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: // tmp = ...IndexOfAny(setChars[0], setChars[1], setChars[2]}); Ldc(primarySet.Chars[0]); Ldc(primarySet.Chars[1]); Ldc(primarySet.Chars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(new string(primarySet.Chars)); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } if (needLoop) { // i += tmp; // if (tmp < 0) goto returnFalse; using (RentedLocalBuilder tmp = RentInt32Local()) { Stloc(tmp); Ldloc(iLocal); Ldloc(tmp); Add(); Stloc(iLocal); Ldloc(tmp); Ldc(0); BltFar(returnFalse); } } else { // i = tmp; // if (i < 0) goto returnFalse; Stloc(iLocal); Ldloc(iLocal); Ldc(0); BltFar(returnFalse); } // if (i >= slice.Length - (minRequiredLength - 1)) goto returnFalse; if (sets.Count > 1) { Debug.Assert(needLoop); Ldloca(textSpanLocal); Call(s_spanGetLengthMethod); Ldc(minRequiredLength - 1); Sub(); Ldloc(iLocal); BleFar(returnFalse); } } // if (!CharInClass(slice[i], prefix[0], "...")) continue; // if (!CharInClass(slice[i + 1], prefix[1], "...")) continue; // if (!CharInClass(slice[i + 2], prefix[2], "...")) continue; // ... Debug.Assert(setIndex is 0 or 1); for ( ; setIndex < sets.Count; setIndex++) { Debug.Assert(needLoop); Ldloca(textSpanLocal); Ldloc(iLocal); if (sets[setIndex].Distance != 0) { Ldc(sets[setIndex].Distance); Add(); } Call(s_spanGetItemMethod); LdindU2(); EmitMatchCharacterClass(sets[setIndex].Set, sets[setIndex].CaseInsensitive); BrfalseFar(charNotInClassLabel); } // base.runtextpos = pos + i; // return true; Ldthis(); Ldloc(pos); Ldloc(iLocal); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); if (needLoop) { MarkLabel(charNotInClassLabel); // for (...; ...; i++) Ldloc(iLocal); Ldc(1); Add(); Stloc(iLocal); // for (...; i < span.Length - (minRequiredLength - 1); ...); MarkLabel(checkSpanLengthLabel); Ldloc(iLocal); Ldloca(textSpanLocal); Call(s_spanGetLengthMethod); if (setsToUse > 1 || primarySet.Distance != 0) { Ldc(minRequiredLength - 1); Sub(); } BltFar(loopBody); // base.runtextpos = inputSpan.Length; // return false; BrFar(returnFalse); } } // Emits a search for a literal following a leading atomic single-character loop. void EmitLiteralAfterAtomicLoop() { Debug.Assert(_regexTree.FindOptimizations.LiteralAfterLoop is not null); (RegexNode LoopNode, (char Char, string? String, char[]? Chars) Literal) target = _regexTree.FindOptimizations.LiteralAfterLoop.Value; Debug.Assert(target.LoopNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic); Debug.Assert(target.LoopNode.N == int.MaxValue); // while (true) Label loopBody = DefineLabel(); Label loopEnd = DefineLabel(); MarkLabel(loopBody); // ReadOnlySpan<char> slice = inputSpan.Slice(pos); using RentedLocalBuilder slice = RentReadOnlySpanCharLocal(); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(slice); // Find the literal. If we can't find it, we're done searching. // int i = slice.IndexOf(literal); // if (i < 0) break; using RentedLocalBuilder i = RentInt32Local(); Ldloc(slice); if (target.Literal.String is string literalString) { Ldstr(literalString); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); } else if (target.Literal.Chars is not char[] literalChars) { Ldc(target.Literal.Char); Call(s_spanIndexOfChar); } else { switch (literalChars.Length) { case 2: Ldc(literalChars[0]); Ldc(literalChars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(literalChars[0]); Ldc(literalChars[1]); Ldc(literalChars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(new string(literalChars)); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } Stloc(i); Ldloc(i); Ldc(0); BltFar(loopEnd); // We found the literal. Walk backwards from it finding as many matches as we can against the loop. // int prev = i; using RentedLocalBuilder prev = RentInt32Local(); Ldloc(i); Stloc(prev); // while ((uint)--prev < (uint)slice.Length) && MatchCharClass(slice[prev])); Label innerLoopBody = DefineLabel(); Label innerLoopEnd = DefineLabel(); MarkLabel(innerLoopBody); Ldloc(prev); Ldc(1); Sub(); Stloc(prev); Ldloc(prev); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUn(innerLoopEnd); Ldloca(slice); Ldloc(prev); Call(s_spanGetItemMethod); LdindU2(); EmitMatchCharacterClass(target.LoopNode.Str!, caseInsensitive: false); BrtrueFar(innerLoopBody); MarkLabel(innerLoopEnd); if (target.LoopNode.M > 0) { // If we found fewer than needed, loop around to try again. The loop doesn't overlap with the literal, // so we can start from after the last place the literal matched. // if ((i - prev - 1) < target.LoopNode.M) // { // pos += i + 1; // continue; // } Label metMinimum = DefineLabel(); Ldloc(i); Ldloc(prev); Sub(); Ldc(1); Sub(); Ldc(target.LoopNode.M); Bge(metMinimum); Ldloc(pos); Ldloc(i); Add(); Ldc(1); Add(); Stloc(pos); BrFar(loopBody); MarkLabel(metMinimum); } // We have a winner. The starting position is just after the last position that failed to match the loop. // TODO: It'd be nice to be able to communicate i as a place the matching engine can start matching // after the loop, so that it doesn't need to re-match the loop. // base.runtextpos = pos + prev + 1; // return true; Ldthis(); Ldloc(pos); Ldloc(prev); Add(); Ldc(1); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); // } MarkLabel(loopEnd); // base.runtextpos = inputSpan.Length; // return false; BrFar(returnFalse); } } /// <summary>Generates the implementation for TryMatchAtCurrentPosition.</summary> protected void EmitTryMatchAtCurrentPosition() { // In .NET Framework and up through .NET Core 3.1, the code generated for RegexOptions.Compiled was effectively an unrolled // version of what RegexInterpreter would process. The RegexNode tree would be turned into a series of opcodes via // RegexWriter; the interpreter would then sit in a loop processing those opcodes, and the RegexCompiler iterated through the // opcodes generating code for each equivalent to what the interpreter would do albeit with some decisions made at compile-time // rather than at run-time. This approach, however, lead to complicated code that wasn't pay-for-play (e.g. a big backtracking // jump table that all compilations went through even if there was no backtracking), that didn't factor in the shape of the // tree (e.g. it's difficult to add optimizations based on interactions between nodes in the graph), and that didn't read well // when decompiled from IL to C# or when directly emitted as C# as part of a source generator. // // This implementation is instead based on directly walking the RegexNode tree and outputting code for each node in the graph. // A dedicated for each kind of RegexNode emits the code necessary to handle that node's processing, including recursively // calling the relevant function for any of its children nodes. Backtracking is handled not via a giant jump table, but instead // by emitting direct jumps to each backtracking construct. This is achieved by having all match failures jump to a "done" // label that can be changed by a previous emitter, e.g. before EmitLoop returns, it ensures that "doneLabel" is set to the // label that code should jump back to when backtracking. That way, a subsequent EmitXx function doesn't need to know exactly // where to jump: it simply always jumps to "doneLabel" on match failure, and "doneLabel" is always configured to point to // the right location. In an expression without backtracking, or before any backtracking constructs have been encountered, // "doneLabel" is simply the final return location from the TryMatchAtCurrentPosition method that will undo any captures and exit, signaling to // the calling scan loop that nothing was matched. Debug.Assert(_regexTree != null); _int32LocalsPool?.Clear(); _readOnlySpanCharLocalsPool?.Clear(); // Get the root Capture node of the tree. RegexNode node = _regexTree.Root; Debug.Assert(node.Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); Debug.Assert(node.ChildCount() == 1, "Capture nodes should have one child"); // Skip the Capture node. We handle the implicit root capture specially. node = node.Child(0); // In some limited cases, TryFindNextPossibleStartingPosition will only return true if it successfully matched the whole expression. // We can special case these to do essentially nothing in TryMatchAtCurrentPosition other than emit the capture. switch (node.Kind) { case RegexNodeKind.Multi or RegexNodeKind.Notone or RegexNodeKind.One or RegexNodeKind.Set when !IsCaseInsensitive(node): // This is the case for single and multiple characters, though the whole thing is only guaranteed // to have been validated in TryFindNextPossibleStartingPosition when doing case-sensitive comparison. // base.Capture(0, base.runtextpos, base.runtextpos + node.Str.Length); // base.runtextpos = base.runtextpos + node.Str.Length; // return true; Ldthis(); Dup(); Ldc(0); Ldthisfld(s_runtextposField); Dup(); Ldc(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1); Add(); Call(s_captureMethod); Ldthisfld(s_runtextposField); Ldc(node.Kind == RegexNodeKind.Multi ? node.Str!.Length : 1); Add(); Stfld(s_runtextposField); Ldc(1); Ret(); return; // The source generator special-cases RegexNode.Empty, for purposes of code learning rather than // performance. Since that's not applicable to RegexCompiler, that code isn't mirrored here. } AnalysisResults analysis = RegexTreeAnalyzer.Analyze(_regexTree); // Initialize the main locals used throughout the implementation. LocalBuilder inputSpan = DeclareReadOnlySpanChar(); LocalBuilder originalPos = DeclareInt32(); LocalBuilder pos = DeclareInt32(); LocalBuilder slice = DeclareReadOnlySpanChar(); Label doneLabel = DefineLabel(); Label originalDoneLabel = doneLabel; if (_hasTimeout) { _loopTimeoutCounter = DeclareInt32(); } // CultureInfo culture = CultureInfo.CurrentCulture; // only if the whole expression or any subportion is ignoring case, and we're not using invariant InitializeCultureForTryMatchAtCurrentPositionIfNecessary(analysis); // ReadOnlySpan<char> inputSpan = input; Ldarg_1(); Stloc(inputSpan); // int pos = base.runtextpos; // int originalpos = pos; Ldthisfld(s_runtextposField); Stloc(pos); Ldloc(pos); Stloc(originalPos); // int stackpos = 0; LocalBuilder stackpos = DeclareInt32(); Ldc(0); Stloc(stackpos); // The implementation tries to use const indexes into the span wherever possible, which we can do // for all fixed-length constructs. In such cases (e.g. single chars, repeaters, strings, etc.) // we know at any point in the regex exactly how far into it we are, and we can use that to index // into the span created at the beginning of the routine to begin at exactly where we're starting // in the input. When we encounter a variable-length construct, we transfer the static value to // pos, slicing the inputSpan appropriately, and then zero out the static position. int sliceStaticPos = 0; SliceInputSpan(); // Check whether there are captures anywhere in the expression. If there isn't, we can skip all // the boilerplate logic around uncapturing, as there won't be anything to uncapture. bool expressionHasCaptures = analysis.MayContainCapture(node); // Emit the code for all nodes in the tree. EmitNode(node); // pos += sliceStaticPos; // base.runtextpos = pos; // Capture(0, originalpos, pos); // return true; Ldthis(); Ldloc(pos); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Add(); Stloc(pos); Ldloc(pos); } Stfld(s_runtextposField); Ldthis(); Ldc(0); Ldloc(originalPos); Ldloc(pos); Call(s_captureMethod); Ldc(1); Ret(); // NOTE: The following is a difference from the source generator. The source generator emits: // UncaptureUntil(0); // return false; // at every location where the all-up match is known to fail. In contrast, the compiler currently // emits this uncapture/return code in one place and jumps to it upon match failure. The difference // stems primarily from the return-at-each-location pattern resulting in cleaner / easier to read // source code, which is not an issue for RegexCompiler emitting IL instead of C#. // If the graph contained captures, undo any remaining to handle failed matches. if (expressionHasCaptures) { // while (base.Crawlpos() != 0) base.Uncapture(); Label finalReturnLabel = DefineLabel(); Br(finalReturnLabel); MarkLabel(originalDoneLabel); Label condition = DefineLabel(); Label body = DefineLabel(); Br(condition); MarkLabel(body); Ldthis(); Call(s_uncaptureMethod); MarkLabel(condition); Ldthis(); Call(s_crawlposMethod); Brtrue(body); // Done: MarkLabel(finalReturnLabel); } else { // Done: MarkLabel(originalDoneLabel); } // return false; Ldc(0); Ret(); // Generated code successfully. return; static bool IsCaseInsensitive(RegexNode node) => (node.Options & RegexOptions.IgnoreCase) != 0; // Slices the inputSpan starting at pos until end and stores it into slice. void SliceInputSpan() { // slice = inputSpan.Slice(pos); Ldloca(inputSpan); Ldloc(pos); Call(s_spanSliceIntMethod); Stloc(slice); } // Emits the sum of a constant and a value from a local. void EmitSum(int constant, LocalBuilder? local) { if (local == null) { Ldc(constant); } else if (constant == 0) { Ldloc(local); } else { Ldloc(local); Ldc(constant); Add(); } } // Emits a check that the span is large enough at the currently known static position to handle the required additional length. void EmitSpanLengthCheck(int requiredLength, LocalBuilder? dynamicRequiredLength = null) { // if ((uint)(sliceStaticPos + requiredLength + dynamicRequiredLength - 1) >= (uint)slice.Length) goto Done; Debug.Assert(requiredLength > 0); EmitSum(sliceStaticPos + requiredLength - 1, dynamicRequiredLength); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(doneLabel); } // Emits code to get ref slice[sliceStaticPos] void EmitTextSpanOffset() { Ldloc(slice); Call(s_memoryMarshalGetReference); if (sliceStaticPos > 0) { Ldc(sliceStaticPos * sizeof(char)); Add(); } } // Adds the value of sliceStaticPos into the pos local, slices textspan by the corresponding amount, // and zeros out sliceStaticPos. void TransferSliceStaticPosToPos() { if (sliceStaticPos > 0) { // pos += sliceStaticPos; Ldloc(pos); Ldc(sliceStaticPos); Add(); Stloc(pos); // slice = slice.Slice(sliceStaticPos); Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); Stloc(slice); // sliceStaticPos = 0; sliceStaticPos = 0; } } // Emits the code for an alternation. void EmitAlternation(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Alternate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); int childCount = node.ChildCount(); Debug.Assert(childCount >= 2); Label originalDoneLabel = doneLabel; // Both atomic and non-atomic are supported. While a parent RegexNode.Atomic node will itself // successfully prevent backtracking into this child node, we can emit better / cheaper code // for an Alternate when it is atomic, so we still take it into account here. Debug.Assert(node.Parent is not null); bool isAtomic = analysis.IsAtomicByAncestor(node); // Label to jump to when any branch completes successfully. Label matchLabel = DefineLabel(); // Save off pos. We'll need to reset this each time a branch fails. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; // We need to be able to undo captures in two situations: // - If a branch of the alternation itself contains captures, then if that branch // fails to match, any captures from that branch until that failure point need to // be uncaptured prior to jumping to the next branch. // - If the expression after the alternation contains captures, then failures // to match in those expressions could trigger backtracking back into the // alternation, and thus we need uncapture any of them. // As such, if the alternation contains captures or if it's not atomic, we need // to grab the current crawl position so we can unwind back to it when necessary. // We can do all of the uncapturing as part of falling through to the next branch. // If we fail in a branch, then such uncapturing will unwind back to the position // at the start of the alternation. If we fail after the alternation, and the // matched branch didn't contain any backtracking, then the failure will end up // jumping to the next branch, which will unwind the captures. And if we fail after // the alternation and the matched branch did contain backtracking, that backtracking // construct is responsible for unwinding back to its starting crawl position. If // it eventually ends up failing, that failure will result in jumping to the next branch // of the alternation, which will again dutifully unwind the remaining captures until // what they were at the start of the alternation. Of course, if there are no captures // anywhere in the regex, we don't have to do any of that. LocalBuilder? startingCapturePos = null; if (expressionHasCaptures && (analysis.MayContainCapture(node) || !isAtomic)) { // startingCapturePos = base.Crawlpos(); startingCapturePos = DeclareInt32(); Ldthis(); Call(s_crawlposMethod); Stloc(startingCapturePos); } // After executing the alternation, subsequent matching may fail, at which point execution // will need to backtrack to the alternation. We emit a branching table at the end of the // alternation, with a label that will be left as the "doneLabel" upon exiting emitting the // alternation. The branch table is populated with an entry for each branch of the alternation, // containing either the label for the last backtracking construct in the branch if such a construct // existed (in which case the doneLabel upon emitting that node will be different from before it) // or the label for the next branch. var labelMap = new Label[childCount]; Label backtrackLabel = DefineLabel(); for (int i = 0; i < childCount; i++) { bool isLastBranch = i == childCount - 1; Label nextBranch = default; if (!isLastBranch) { // Failure to match any branch other than the last one should result // in jumping to process the next branch. nextBranch = DefineLabel(); doneLabel = nextBranch; } else { // Failure to match the last branch is equivalent to failing to match // the whole alternation, which means those failures should jump to // what "doneLabel" was defined as when starting the alternation. doneLabel = originalDoneLabel; } // Emit the code for each branch. EmitNode(node.Child(i)); // Add this branch to the backtracking table. At this point, either the child // had backtracking constructs, in which case doneLabel points to the last one // and that's where we'll want to jump to, or it doesn't, in which case doneLabel // still points to the nextBranch, which similarly is where we'll want to jump to. if (!isAtomic) { // if (stackpos + 3 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = i; // base.runstack[stackpos++] = startingCapturePos; // base.runstack[stackpos++] = startingPos; EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldc(i)); if (startingCapturePos is not null) { EmitStackPush(() => Ldloc(startingCapturePos)); } EmitStackPush(() => Ldloc(startingPos)); } labelMap[i] = doneLabel; // If we get here in the generated code, the branch completed successfully. // Before jumping to the end, we need to zero out sliceStaticPos, so that no // matter what the value is after the branch, whatever follows the alternate // will see the same sliceStaticPos. // pos += sliceStaticPos; // sliceStaticPos = 0; // goto matchLabel; TransferSliceStaticPosToPos(); BrFar(matchLabel); // Reset state for next branch and loop around to generate it. This includes // setting pos back to what it was at the beginning of the alternation, // updating slice to be the full length it was, and if there's a capture that // needs to be reset, uncapturing it. if (!isLastBranch) { // NextBranch: // pos = startingPos; // slice = inputSpan.Slice(pos); // while (base.Crawlpos() > startingCapturePos) base.Uncapture(); MarkLabel(nextBranch); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } } } // We should never fall through to this location in the generated code. Either // a branch succeeded in matching and jumped to the end, or a branch failed in // matching and jumped to the next branch location. We only get to this code // if backtracking occurs and the code explicitly jumps here based on our setting // "doneLabel" to the label for this section. Thus, we only need to emit it if // something can backtrack to us, which can't happen if we're inside of an atomic // node. Thus, emit the backtracking section only if we're non-atomic. if (isAtomic) { doneLabel = originalDoneLabel; } else { doneLabel = backtrackLabel; MarkLabel(backtrackLabel); // startingPos = base.runstack[--stackpos]; // startingCapturePos = base.runstack[--stackpos]; // switch (base.runstack[--stackpos]) { ... } // branch number EmitStackPop(); Stloc(startingPos); if (startingCapturePos is not null) { EmitStackPop(); Stloc(startingCapturePos); } EmitStackPop(); Switch(labelMap); } // Successfully completed the alternate. MarkLabel(matchLabel); Debug.Assert(sliceStaticPos == 0); } // Emits the code to handle a backreference. void EmitBackreference(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Backreference, $"Unexpected type: {node.Kind}"); int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); TransferSliceStaticPosToPos(); Label backreferenceEnd = DefineLabel(); // if (!base.IsMatched(capnum)) goto (ecmascript ? end : doneLabel); Ldthis(); Ldc(capnum); Call(s_isMatchedMethod); BrfalseFar((node.Options & RegexOptions.ECMAScript) == 0 ? doneLabel : backreferenceEnd); using RentedLocalBuilder matchLength = RentInt32Local(); using RentedLocalBuilder matchIndex = RentInt32Local(); using RentedLocalBuilder i = RentInt32Local(); // int matchLength = base.MatchLength(capnum); Ldthis(); Ldc(capnum); Call(s_matchLengthMethod); Stloc(matchLength); // if (slice.Length < matchLength) goto doneLabel; Ldloca(slice); Call(s_spanGetLengthMethod); Ldloc(matchLength); BltFar(doneLabel); // int matchIndex = base.MatchIndex(capnum); Ldthis(); Ldc(capnum); Call(s_matchIndexMethod); Stloc(matchIndex); Label condition = DefineLabel(); Label body = DefineLabel(); // for (int i = 0; ...) Ldc(0); Stloc(i); Br(condition); MarkLabel(body); // if (inputSpan[matchIndex + i] != slice[i]) goto doneLabel; Ldloca(inputSpan); Ldloc(matchIndex); Ldloc(i); Add(); Call(s_spanGetItemMethod); LdindU2(); if (IsCaseInsensitive(node)) { CallToLower(); } Ldloca(slice); Ldloc(i); Call(s_spanGetItemMethod); LdindU2(); if (IsCaseInsensitive(node)) { CallToLower(); } BneFar(doneLabel); // for (...; ...; i++) Ldloc(i); Ldc(1); Add(); Stloc(i); // for (...; i < matchLength; ...) MarkLabel(condition); Ldloc(i); Ldloc(matchLength); Blt(body); // pos += matchLength; Ldloc(pos); Ldloc(matchLength); Add(); Stloc(pos); SliceInputSpan(); MarkLabel(backreferenceEnd); } // Emits the code for an if(backreference)-then-else conditional. void EmitBackreferenceConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.BackreferenceConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 2, $"Expected 2 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // Get the capture number to test. int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(0); RegexNode? noBranch = node.Child(1) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; Label originalDoneLabel = doneLabel; Label refNotMatched = DefineLabel(); Label endConditional = DefineLabel(); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the conditional needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. LocalBuilder resumeAt = DeclareInt32(); // if (!base.IsMatched(capnum)) goto refNotMatched; Ldthis(); Ldc(capnum); Call(s_isMatchedMethod); BrfalseFar(refNotMatched); // The specified capture was captured. Run the "yes" branch. // If it successfully matches, jump to the end. EmitNode(yesBranch); TransferSliceStaticPosToPos(); Label postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 0; Ldc(0); Stloc(resumeAt); } bool needsEndConditional = postYesDoneLabel != originalDoneLabel || noBranch is not null; if (needsEndConditional) { // goto endConditional; BrFar(endConditional); } MarkLabel(refNotMatched); Label postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { // resumeAt = 1; Ldc(1); Stloc(resumeAt); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 2; Ldc(2); Stloc(resumeAt); } } if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { // We're atomic by our parent, so even if either child branch has backtracking constructs, // we don't need to emit any backtracking logic in support, as nothing will backtrack in. // Instead, we just ensure we revert back to the original done label so that any backtracking // skips over this node. doneLabel = originalDoneLabel; if (needsEndConditional) { MarkLabel(endConditional); } } else { // Subsequent expressions might try to backtrack to here, so output a backtracking map based on resumeAt. // Skip the backtracking section // goto endConditional; Debug.Assert(needsEndConditional); Br(endConditional); // Backtrack section Label backtrack = DefineLabel(); doneLabel = backtrack; MarkLabel(backtrack); // Pop from the stack the branch that was used and jump back to its backtracking location. // resumeAt = base.runstack[--stackpos]; EmitStackPop(); Stloc(resumeAt); if (postYesDoneLabel != originalDoneLabel) { // if (resumeAt == 0) goto postIfDoneLabel; Ldloc(resumeAt); Ldc(0); BeqFar(postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { // if (resumeAt == 1) goto postNoDoneLabel; Ldloc(resumeAt); Ldc(1); BeqFar(postNoDoneLabel); } // goto originalDoneLabel; BrFar(originalDoneLabel); if (needsEndConditional) { MarkLabel(endConditional); } // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = resumeAt; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(resumeAt)); } } // Emits the code for an if(expression)-then-else conditional. void EmitExpressionConditional(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.ExpressionConditional, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 3, $"Expected 3 children, found {node.ChildCount()}"); bool isAtomic = analysis.IsAtomicByAncestor(node); // We're branching in a complicated fashion. Make sure sliceStaticPos is 0. TransferSliceStaticPosToPos(); // The first child node is the condition expression. If this matches, then we branch to the "yes" branch. // If it doesn't match, then we branch to the optional "no" branch if it exists, or simply skip the "yes" // branch, otherwise. The condition is treated as a positive lookahead. RegexNode condition = node.Child(0); // Get the "yes" branch and the "no" branch. The "no" branch is optional in syntax and is thus // somewhat likely to be Empty. RegexNode yesBranch = node.Child(1); RegexNode? noBranch = node.Child(2) is { Kind: not RegexNodeKind.Empty } childNo ? childNo : null; Label originalDoneLabel = doneLabel; Label expressionNotMatched = DefineLabel(); Label endConditional = DefineLabel(); // As with alternations, we have potentially multiple branches, each of which may contain // backtracking constructs, but the expression after the condition needs a single target // to backtrack to. So, we expose a single Backtrack label and track which branch was // followed in this resumeAt local. LocalBuilder? resumeAt = null; if (!isAtomic) { resumeAt = DeclareInt32(); } // If the condition expression has captures, we'll need to uncapture them in the case of no match. LocalBuilder? startingCapturePos = null; if (analysis.MayContainCapture(condition)) { // int startingCapturePos = base.Crawlpos(); startingCapturePos = DeclareInt32(); Ldthis(); Call(s_crawlposMethod); Stloc(startingCapturePos); } // Emit the condition expression. Route any failures to after the yes branch. This code is almost // the same as for a positive lookahead; however, a positive lookahead only needs to reset the position // on a successful match, as a failed match fails the whole expression; here, we need to reset the // position on completion, regardless of whether the match is successful or not. doneLabel = expressionNotMatched; // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingSliceStaticPos = sliceStaticPos; // Emit the child. The condition expression is a zero-width assertion, which is atomic, // so prevent backtracking into it. EmitNode(condition); doneLabel = originalDoneLabel; // After the condition completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. // pos = startingPos; // slice = inputSpan.Slice(pos); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingSliceStaticPos; // The expression matched. Run the "yes" branch. If it successfully matches, jump to the end. EmitNode(yesBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch Label postYesDoneLabel = doneLabel; if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 0; Ldc(0); Stloc(resumeAt!); } // goto endConditional; BrFar(endConditional); // After the condition completes unsuccessfully, reset the text positions // _and_ reset captures, which should not persist when the whole expression failed. // pos = startingPos; MarkLabel(expressionNotMatched); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingSliceStaticPos; if (startingCapturePos is not null) { EmitUncaptureUntil(startingCapturePos); } Label postNoDoneLabel = originalDoneLabel; if (noBranch is not null) { // Output the no branch. doneLabel = originalDoneLabel; EmitNode(noBranch); TransferSliceStaticPosToPos(); // make sure sliceStaticPos is 0 after each branch postNoDoneLabel = doneLabel; if (!isAtomic && postNoDoneLabel != originalDoneLabel) { // resumeAt = 1; Ldc(1); Stloc(resumeAt!); } } else { // There's only a yes branch. If it's going to cause us to output a backtracking // label but code may not end up taking the yes branch path, we need to emit a resumeAt // that will cause the backtracking to immediately pass through this node. if (!isAtomic && postYesDoneLabel != originalDoneLabel) { // resumeAt = 2; Ldc(2); Stloc(resumeAt!); } } // If either the yes branch or the no branch contained backtracking, subsequent expressions // might try to backtrack to here, so output a backtracking map based on resumeAt. if (isAtomic || (postYesDoneLabel == originalDoneLabel && postNoDoneLabel == originalDoneLabel)) { // EndConditional: doneLabel = originalDoneLabel; MarkLabel(endConditional); } else { Debug.Assert(resumeAt is not null); // Skip the backtracking section. BrFar(endConditional); Label backtrack = DefineLabel(); doneLabel = backtrack; MarkLabel(backtrack); // resumeAt = StackPop(); EmitStackPop(); Stloc(resumeAt); if (postYesDoneLabel != originalDoneLabel) { // if (resumeAt == 0) goto postYesDoneLabel; Ldloc(resumeAt); Ldc(0); BeqFar(postYesDoneLabel); } if (postNoDoneLabel != originalDoneLabel) { // if (resumeAt == 1) goto postNoDoneLabel; Ldloc(resumeAt); Ldc(1); BeqFar(postNoDoneLabel); } // goto postConditionalDoneLabel; BrFar(originalDoneLabel); // EndConditional: MarkLabel(endConditional); // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = resumeAt; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(resumeAt!)); } } // Emits the code for a Capture node. void EmitCapture(RegexNode node, RegexNode? subsequent = null) { Debug.Assert(node.Kind is RegexNodeKind.Capture, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int capnum = RegexParser.MapCaptureNumber(node.M, _regexTree!.CaptureNumberSparseMapping); int uncapnum = RegexParser.MapCaptureNumber(node.N, _regexTree.CaptureNumberSparseMapping); bool isAtomic = analysis.IsAtomicByAncestor(node); // pos += sliceStaticPos; // slice = slice.Slice(sliceStaticPos); // startingPos = pos; TransferSliceStaticPosToPos(); LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); RegexNode child = node.Child(0); if (uncapnum != -1) { // if (!IsMatched(uncapnum)) goto doneLabel; Ldthis(); Ldc(uncapnum); Call(s_isMatchedMethod); BrfalseFar(doneLabel); } // Emit child node. Label originalDoneLabel = doneLabel; EmitNode(child, subsequent); bool childBacktracks = doneLabel != originalDoneLabel; // pos += sliceStaticPos; // slice = slice.Slice(sliceStaticPos); TransferSliceStaticPosToPos(); if (uncapnum == -1) { // Capture(capnum, startingPos, pos); Ldthis(); Ldc(capnum); Ldloc(startingPos); Ldloc(pos); Call(s_captureMethod); } else { // TransferCapture(capnum, uncapnum, startingPos, pos); Ldthis(); Ldc(capnum); Ldc(uncapnum); Ldloc(startingPos); Ldloc(pos); Call(s_transferCaptureMethod); } if (isAtomic || !childBacktracks) { // If the capture is atomic and nothing can backtrack into it, we're done. // Similarly, even if the capture isn't atomic, if the captured expression // doesn't do any backtracking, we're done. doneLabel = originalDoneLabel; } else { // We're not atomic and the child node backtracks. When it does, we need // to ensure that the starting position for the capture is appropriately // reset to what it was initially (it could have changed as part of being // in a loop or similar). So, we emit a backtracking section that // pushes/pops the starting position before falling through. // if (stackpos + 1 >= base.runstack.Length) Array.Resize(ref base.runstack, base.runstack.Length * 2); // base.runstack[stackpos++] = startingPos; EmitStackResizeIfNeeded(1); EmitStackPush(() => Ldloc(startingPos)); // Skip past the backtracking section // goto backtrackingEnd; Label backtrackingEnd = DefineLabel(); Br(backtrackingEnd); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); EmitStackPop(); Stloc(startingPos); if (!childBacktracks) { // pos = startingPos Ldloc(startingPos); Stloc(pos); SliceInputSpan(); } // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } // Emits code to unwind the capture stack until the crawl position specified in the provided local. void EmitUncaptureUntil(LocalBuilder startingCapturePos) { Debug.Assert(startingCapturePos != null); // while (base.Crawlpos() > startingCapturePos) base.Uncapture(); Label condition = DefineLabel(); Label body = DefineLabel(); Br(condition); MarkLabel(body); Ldthis(); Call(s_uncaptureMethod); MarkLabel(condition); Ldthis(); Call(s_crawlposMethod); Ldloc(startingCapturePos); Bgt(body); } // Emits the code to handle a positive lookahead assertion. void EmitPositiveLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.PositiveLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // After the child completes successfully, reset the text positions. // Do not reset captures, which persist beyond the lookahead. // pos = startingPos; // slice = inputSpan.Slice(pos); Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; } // Emits the code to handle a negative lookahead assertion. void EmitNegativeLookaheadAssertion(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Label originalDoneLabel = doneLabel; // Save off pos. We'll need to reset this upon successful completion of the lookahead. // startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); int startingTextSpanPos = sliceStaticPos; Label negativeLookaheadDoneLabel = DefineLabel(); doneLabel = negativeLookaheadDoneLabel; // Emit the child. RegexNode child = node.Child(0); if (analysis.MayBacktrack(child)) { // Lookarounds are implicitly atomic, so we need to emit the node as atomic if it might backtrack. EmitAtomic(node, null); } else { EmitNode(child); } // If the generated code ends up here, it matched the lookahead, which actually // means failure for a _negative_ lookahead, so we need to jump to the original done. // goto originalDoneLabel; BrFar(originalDoneLabel); // Failures (success for a negative lookahead) jump here. MarkLabel(negativeLookaheadDoneLabel); if (doneLabel == negativeLookaheadDoneLabel) { doneLabel = originalDoneLabel; } // After the child completes in failure (success for negative lookahead), reset the text positions. // pos = startingPos; Ldloc(startingPos); Stloc(pos); SliceInputSpan(); sliceStaticPos = startingTextSpanPos; doneLabel = originalDoneLabel; } // Emits the code for the node. void EmitNode(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { StackHelper.CallOnEmptyStack(EmitNode, node, subsequent, emitLengthChecksIfRequired); return; } switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: case RegexNodeKind.Bol: case RegexNodeKind.Eol: case RegexNodeKind.End: case RegexNodeKind.EndZ: EmitAnchors(node); break; case RegexNodeKind.Boundary: case RegexNodeKind.NonBoundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.NonECMABoundary: EmitBoundary(node); break; case RegexNodeKind.Multi: EmitMultiChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: EmitSingleChar(node, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: EmitSingleCharLoop(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: EmitSingleCharLazy(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: EmitSingleCharAtomicLoop(node); break; case RegexNodeKind.Loop: EmitLoop(node); break; case RegexNodeKind.Lazyloop: EmitLazy(node); break; case RegexNodeKind.Alternate: EmitAlternation(node); break; case RegexNodeKind.Concatenate: EmitConcatenation(node, subsequent, emitLengthChecksIfRequired); break; case RegexNodeKind.Atomic: EmitAtomic(node, subsequent); break; case RegexNodeKind.Backreference: EmitBackreference(node); break; case RegexNodeKind.BackreferenceConditional: EmitBackreferenceConditional(node); break; case RegexNodeKind.ExpressionConditional: EmitExpressionConditional(node); break; case RegexNodeKind.Capture: EmitCapture(node, subsequent); break; case RegexNodeKind.PositiveLookaround: EmitPositiveLookaheadAssertion(node); break; case RegexNodeKind.NegativeLookaround: EmitNegativeLookaheadAssertion(node); break; case RegexNodeKind.Nothing: BrFar(doneLabel); break; case RegexNodeKind.Empty: // Emit nothing. break; case RegexNodeKind.UpdateBumpalong: EmitUpdateBumpalong(node); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } } // Emits the node for an atomic. void EmitAtomic(RegexNode node, RegexNode? subsequent) { Debug.Assert(node.Kind is RegexNodeKind.Atomic or RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); RegexNode child = node.Child(0); if (!analysis.MayBacktrack(child)) { // If the child has no backtracking, the atomic is a nop and we can just skip it. // Note that the source generator equivalent for this is in the top-level EmitNode, in order to avoid // outputting some extra comments and scopes. As such formatting isn't a concern for the compiler, // the logic is instead here in EmitAtomic. EmitNode(child, subsequent); return; } // Grab the current done label and the current backtracking position. The purpose of the atomic node // is to ensure that nodes after it that might backtrack skip over the atomic, which means after // rendering the atomic's child, we need to reset the label so that subsequent backtracking doesn't // see any label left set by the atomic's child. We also need to reset the backtracking stack position // so that the state on the stack remains consistent. Label originalDoneLabel = doneLabel; // int startingStackpos = stackpos; using RentedLocalBuilder startingStackpos = RentInt32Local(); Ldloc(stackpos); Stloc(startingStackpos); // Emit the child. EmitNode(child, subsequent); // Reset the stack position and done label. // stackpos = startingStackpos; Ldloc(startingStackpos); Stloc(stackpos); doneLabel = originalDoneLabel; } // Emits the code to handle updating base.runtextpos to pos in response to // an UpdateBumpalong node. This is used when we want to inform the scan loop that // it should bump from this location rather than from the original location. void EmitUpdateBumpalong(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.UpdateBumpalong, $"Unexpected type: {node.Kind}"); // if (base.runtextpos < pos) // { // base.runtextpos = pos; // } TransferSliceStaticPosToPos(); Ldthisfld(s_runtextposField); Ldloc(pos); Label skipUpdate = DefineLabel(); Bge(skipUpdate); Ldthis(); Ldloc(pos); Stfld(s_runtextposField); MarkLabel(skipUpdate); } // Emits code for a concatenation void EmitConcatenation(RegexNode node, RegexNode? subsequent, bool emitLengthChecksIfRequired) { Debug.Assert(node.Kind is RegexNodeKind.Concatenate, $"Unexpected type: {node.Kind}"); Debug.Assert(node.ChildCount() >= 2, $"Expected at least 2 children, found {node.ChildCount()}"); // Emit the code for each child one after the other. int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { // If we can find a subsequence of fixed-length children, we can emit a length check once for that sequence // and then skip the individual length checks for each. if (emitLengthChecksIfRequired && node.TryGetJoinableLengthCheckChildRange(i, out int requiredLength, out int exclusiveEnd)) { EmitSpanLengthCheck(requiredLength); for (; i < exclusiveEnd; i++) { EmitNode(node.Child(i), GetSubsequent(i, node, subsequent), emitLengthChecksIfRequired: false); } i--; continue; } EmitNode(node.Child(i), GetSubsequent(i, node, subsequent)); } // Gets the node to treat as the subsequent one to node.Child(index) static RegexNode? GetSubsequent(int index, RegexNode node, RegexNode? subsequent) { int childCount = node.ChildCount(); for (int i = index + 1; i < childCount; i++) { RegexNode next = node.Child(i); if (next.Kind is not RegexNodeKind.UpdateBumpalong) // skip node types that don't have a semantic impact { return next; } } return subsequent; } } // Emits the code to handle a single-character match. void EmitSingleChar(RegexNode node, bool emitLengthCheck = true, LocalBuilder? offset = null) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); // This only emits a single check, but it's called from the looping constructs in a loop // to generate the code for a single check, so we check for each "family" (one, notone, set) // rather than only for the specific single character nodes. // if ((uint)(sliceStaticPos + offset) >= slice.Length || slice[sliceStaticPos + offset] != ch) goto Done; if (emitLengthCheck) { EmitSpanLengthCheck(1, offset); } Ldloca(slice); EmitSum(sliceStaticPos, offset); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(doneLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(doneLabel); } else // IsNotoneFamily { BeqFar(doneLabel); } } sliceStaticPos++; } // Emits the code to handle a boundary check on a character. void EmitBoundary(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Boundary or RegexNodeKind.NonBoundary or RegexNodeKind.ECMABoundary or RegexNodeKind.NonECMABoundary, $"Unexpected type: {node.Kind}"); // if (!IsBoundary(inputSpan, pos + sliceStaticPos)) goto doneLabel; Ldthis(); Ldloc(inputSpan); Ldloc(pos); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Add(); } switch (node.Kind) { case RegexNodeKind.Boundary: Call(s_isBoundaryMethod); BrfalseFar(doneLabel); break; case RegexNodeKind.NonBoundary: Call(s_isBoundaryMethod); BrtrueFar(doneLabel); break; case RegexNodeKind.ECMABoundary: Call(s_isECMABoundaryMethod); BrfalseFar(doneLabel); break; default: Debug.Assert(node.Kind == RegexNodeKind.NonECMABoundary); Call(s_isECMABoundaryMethod); BrtrueFar(doneLabel); break; } } // Emits the code to handle various anchors. void EmitAnchors(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Beginning or RegexNodeKind.Start or RegexNodeKind.Bol or RegexNodeKind.End or RegexNodeKind.EndZ or RegexNodeKind.Eol, $"Unexpected type: {node.Kind}"); Debug.Assert(sliceStaticPos >= 0); switch (node.Kind) { case RegexNodeKind.Beginning: case RegexNodeKind.Start: if (sliceStaticPos > 0) { // If we statically know we've already matched part of the regex, there's no way we're at the // beginning or start, as we've already progressed past it. BrFar(doneLabel); } else { // if (pos > 0/start) goto doneLabel; Ldloc(pos); if (node.Kind == RegexNodeKind.Beginning) { Ldc(0); } else { Ldthisfld(s_runtextstartField); } BneFar(doneLabel); } break; case RegexNodeKind.Bol: if (sliceStaticPos > 0) { // if (slice[sliceStaticPos - 1] != '\n') goto doneLabel; Ldloca(slice); Ldc(sliceStaticPos - 1); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); } else { // We can't use our slice in this case, because we'd need to access slice[-1], so we access the runtext field directly: // if (pos > 0 && base.runtext[pos - 1] != '\n') goto doneLabel; Label success = DefineLabel(); Ldloc(pos); Ldc(0); Ble(success); Ldloca(inputSpan); Ldloc(pos); Ldc(1); Sub(); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); MarkLabel(success); } break; case RegexNodeKind.End: // if (sliceStaticPos < slice.Length) goto doneLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BltUnFar(doneLabel); break; case RegexNodeKind.EndZ: // if (sliceStaticPos < slice.Length - 1) goto doneLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); Ldc(1); Sub(); BltFar(doneLabel); goto case RegexNodeKind.Eol; case RegexNodeKind.Eol: // if (sliceStaticPos < slice.Length && slice[sliceStaticPos] != '\n') goto doneLabel; { Label success = DefineLabel(); Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUn(success); Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanGetItemMethod); LdindU2(); Ldc('\n'); BneFar(doneLabel); MarkLabel(success); } break; } } // Emits the code to handle a multiple-character match. void EmitMultiChar(RegexNode node, bool emitLengthCheck) { Debug.Assert(node.Kind is RegexNodeKind.Multi, $"Unexpected type: {node.Kind}"); EmitMultiCharString(node.Str!, IsCaseInsensitive(node), emitLengthCheck); } void EmitMultiCharString(string str, bool caseInsensitive, bool emitLengthCheck) { Debug.Assert(str.Length >= 2); if (caseInsensitive) // StartsWith(..., XxIgnoreCase) won't necessarily be the same as char-by-char comparison { // This case should be relatively rare. It will only occur with IgnoreCase and a series of non-ASCII characters. if (emitLengthCheck) { EmitSpanLengthCheck(str.Length); } foreach (char c in str) { // if (c != slice[sliceStaticPos++]) goto doneLabel; EmitTextSpanOffset(); sliceStaticPos++; LdindU2(); CallToLower(); Ldc(c); BneFar(doneLabel); } } else { // if (!slice.Slice(sliceStaticPos).StartsWith("...") goto doneLabel; Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); Ldstr(str); Call(s_stringAsSpanMethod); Call(s_spanStartsWith); BrfalseFar(doneLabel); sliceStaticPos += str.Length; } } // Emits the code to handle a backtracking, single-character loop. void EmitSingleCharLoop(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop, $"Unexpected type: {node.Kind}"); // If this is actually atomic based on its parent, emit it as atomic instead; no backtracking necessary. if (analysis.IsAtomicByAncestor(node)) { EmitSingleCharAtomicLoop(node); return; } // If this is actually a repeater, emit that instead; no backtracking necessary. if (node.M == node.N) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); return; } // Emit backtracking around an atomic single char loop. We can then implement the backtracking // as an afterthought, since we know exactly how many characters are accepted by each iteration // of the wrapped loop (1) and that there's nothing captured by the loop. Debug.Assert(node.M < node.N); Label backtrackingLabel = DefineLabel(); Label endLoop = DefineLabel(); LocalBuilder startingPos = DeclareInt32(); LocalBuilder endingPos = DeclareInt32(); LocalBuilder? capturepos = expressionHasCaptures ? DeclareInt32() : null; // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // Grab the current position, then emit the loop as atomic, and then // grab the current position again. Even though we emit the loop without // knowledge of backtracking, we can layer it on top by just walking back // through the individual characters (a benefit of the loop matching exactly // one character per iteration, no possible captures within the loop, etc.) // int startingPos = pos; Ldloc(pos); Stloc(startingPos); EmitSingleCharAtomicLoop(node); // pos += sliceStaticPos; // int endingPos = pos; TransferSliceStaticPosToPos(); Ldloc(pos); Stloc(endingPos); // int capturepos = base.Crawlpos(); if (capturepos is not null) { Ldthis(); Call(s_crawlposMethod); Stloc(capturepos); } // startingPos += node.M; if (node.M > 0) { Ldloc(startingPos); Ldc(node.M); Add(); Stloc(startingPos); } // goto endLoop; BrFar(endLoop); // Backtracking section. Subsequent failures will jump to here, at which // point we decrement the matched count as long as it's above the minimum // required, and try again by flowing to everything that comes after this. MarkLabel(backtrackingLabel); if (capturepos is not null) { // capturepos = base.runstack[--stackpos]; // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitStackPop(); Stloc(capturepos); EmitUncaptureUntil(capturepos); } // endingPos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(endingPos); EmitStackPop(); Stloc(startingPos); // if (startingPos >= endingPos) goto doneLabel; Ldloc(startingPos); Ldloc(endingPos); BgeFar(doneLabel); if (subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal) { // endingPos = inputSpan.Slice(startingPos, Math.Min(inputSpan.Length, endingPos + literal.Length - 1) - startingPos).LastIndexOf(literal); // if (endingPos < 0) // { // goto doneLabel; // } Ldloca(inputSpan); Ldloc(startingPos); if (literal.Item2 is not null) { Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldloc(endingPos); Ldc(literal.Item2.Length - 1); Add(); Call(s_mathMinIntInt); Ldloc(startingPos); Sub(); Call(s_spanSliceIntIntMethod); Ldstr(literal.Item2); Call(s_stringAsSpanMethod); Call(s_spanLastIndexOfSpan); } else { Ldloc(endingPos); Ldloc(startingPos); Sub(); Call(s_spanSliceIntIntMethod); if (literal.Item3 is not null) { switch (literal.Item3.Length) { case 2: Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Call(s_spanLastIndexOfAnyCharChar); break; case 3: Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Ldc(literal.Item3[2]); Call(s_spanLastIndexOfAnyCharCharChar); break; default: Ldstr(literal.Item3); Call(s_stringAsSpanMethod); Call(s_spanLastIndexOfAnySpan); break; } } else { Ldc(literal.Item1); Call(s_spanLastIndexOfChar); } } Stloc(endingPos); Ldloc(endingPos); Ldc(0); BltFar(doneLabel); // endingPos += startingPos; Ldloc(endingPos); Ldloc(startingPos); Add(); Stloc(endingPos); } else { // endingPos--; Ldloc(endingPos); Ldc(1); Sub(); Stloc(endingPos); } // pos = endingPos; Ldloc(endingPos); Stloc(pos); // slice = inputSpan.Slice(pos); SliceInputSpan(); MarkLabel(endLoop); EmitStackResizeIfNeeded(expressionHasCaptures ? 3 : 2); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(endingPos)); if (capturepos is not null) { EmitStackPush(() => Ldloc(capturepos!)); } doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes } void EmitSingleCharLazy(RegexNode node, RegexNode? subsequent = null, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.Kind is RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy, $"Unexpected type: {node.Kind}"); // Emit the min iterations as a repeater. Any failures here don't necessitate backtracking, // as the lazy itself failed to match, and there's no backtracking possible by the individual // characters/iterations themselves. if (node.M > 0) { EmitSingleCharRepeater(node, emitLengthChecksIfRequired); } // If the whole thing was actually that repeater, we're done. Similarly, if this is actually an atomic // lazy loop, nothing will ever backtrack into this node, so we never need to iterate more than the minimum. if (node.M == node.N || analysis.IsAtomicByAncestor(node)) { return; } Debug.Assert(node.M < node.N); // We now need to match one character at a time, each time allowing the remainder of the expression // to try to match, and only matching another character if the subsequent expression fails to match. // We're about to enter a loop, so ensure our text position is 0. TransferSliceStaticPosToPos(); // If the loop isn't unbounded, track the number of iterations and the max number to allow. LocalBuilder? iterationCount = null; int? maxIterations = null; if (node.N != int.MaxValue) { maxIterations = node.N - node.M; // int iterationCount = 0; iterationCount = DeclareInt32(); Ldc(0); Stloc(iterationCount); } // Track the current crawl position. Upon backtracking, we'll unwind any captures beyond this point. LocalBuilder? capturepos = expressionHasCaptures ? DeclareInt32() : null; // Track the current pos. Each time we backtrack, we'll reset to the stored position, which // is also incremented each time we match another character in the loop. // int startingPos = pos; LocalBuilder startingPos = DeclareInt32(); Ldloc(pos); Stloc(startingPos); // Skip the backtracking section for the initial subsequent matching. We've already matched the // minimum number of iterations, which means we can successfully match with zero additional iterations. // goto endLoopLabel; Label endLoopLabel = DefineLabel(); BrFar(endLoopLabel); // Backtracking section. Subsequent failures will jump to here. Label backtrackingLabel = DefineLabel(); MarkLabel(backtrackingLabel); // Uncapture any captures if the expression has any. It's possible the captures it has // are before this node, in which case this is wasted effort, but still functionally correct. if (capturepos is not null) { // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitUncaptureUntil(capturepos); } // If there's a max number of iterations, see if we've exceeded the maximum number of characters // to match. If we haven't, increment the iteration count. if (maxIterations is not null) { // if (iterationCount >= maxIterations) goto doneLabel; Ldloc(iterationCount!); Ldc(maxIterations.Value); BgeFar(doneLabel); // iterationCount++; Ldloc(iterationCount!); Ldc(1); Add(); Stloc(iterationCount!); } // Now match the next item in the lazy loop. We need to reset the pos to the position // just after the last character in this loop was matched, and we need to store the resulting position // for the next time we backtrack. // pos = startingPos; // Match single char; Ldloc(startingPos); Stloc(pos); SliceInputSpan(); EmitSingleChar(node); TransferSliceStaticPosToPos(); // Now that we've appropriately advanced by one character and are set for what comes after the loop, // see if we can skip ahead more iterations by doing a search for a following literal. if (iterationCount is null && node.Kind is RegexNodeKind.Notonelazy && !IsCaseInsensitive(node) && subsequent?.FindStartingLiteral(4) is ValueTuple<char, string?, string?> literal && // 5 == max optimized by IndexOfAny, and we need to reserve 1 for node.Ch (literal.Item3 is not null ? !literal.Item3.Contains(node.Ch) : (literal.Item2?[0] ?? literal.Item1) != node.Ch)) // no overlap between node.Ch and the start of the literal { // e.g. "<[^>]*?>" // This lazy loop will consume all characters other than node.Ch until the subsequent literal. // We can implement it to search for either that char or the literal, whichever comes first. // If it ends up being that node.Ch, the loop fails (we're only here if we're backtracking). // startingPos = slice.IndexOfAny(node.Ch, literal); Ldloc(slice); if (literal.Item3 is not null) { switch (literal.Item3.Length) { case 2: Ldc(node.Ch); Ldc(literal.Item3[0]); Ldc(literal.Item3[1]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(node.Ch + literal.Item3); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } else { Ldc(node.Ch); Ldc(literal.Item2?[0] ?? literal.Item1); Call(s_spanIndexOfAnyCharChar); } Stloc(startingPos); // if ((uint)startingPos >= (uint)slice.Length) goto doneLabel; Ldloc(startingPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(doneLabel); // if (slice[startingPos] == node.Ch) goto doneLabel; Ldloca(slice); Ldloc(startingPos); Call(s_spanGetItemMethod); LdindU2(); Ldc(node.Ch); BeqFar(doneLabel); // pos += startingPos; // slice = inputSpace.Slice(pos); Ldloc(pos); Ldloc(startingPos); Add(); Stloc(pos); SliceInputSpan(); } else if (iterationCount is null && node.Kind is RegexNodeKind.Setlazy && node.Str == RegexCharClass.AnyClass && subsequent?.FindStartingLiteral() is ValueTuple<char, string?, string?> literal2) { // e.g. ".*?string" with RegexOptions.Singleline // This lazy loop will consume all characters until the subsequent literal. If the subsequent literal // isn't found, the loop fails. We can implement it to just search for that literal. // startingPos = slice.IndexOf(literal); Ldloc(slice); if (literal2.Item2 is not null) { Ldstr(literal2.Item2); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); } else if (literal2.Item3 is not null) { switch (literal2.Item3.Length) { case 2: Ldc(literal2.Item3[0]); Ldc(literal2.Item3[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(literal2.Item3[0]); Ldc(literal2.Item3[1]); Ldc(literal2.Item3[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(literal2.Item3); Call(s_stringAsSpanMethod); Call(s_spanIndexOfAnySpan); break; } } else { Ldc(literal2.Item1); Call(s_spanIndexOfChar); } Stloc(startingPos); // if (startingPos < 0) goto doneLabel; Ldloc(startingPos); Ldc(0); BltFar(doneLabel); // pos += startingPos; // slice = inputSpace.Slice(pos); Ldloc(pos); Ldloc(startingPos); Add(); Stloc(pos); SliceInputSpan(); } // Store the position we've left off at in case we need to iterate again. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Update the done label for everything that comes after this node. This is done after we emit the single char // matching, as that failing indicates the loop itself has failed to match. Label originalDoneLabel = doneLabel; doneLabel = backtrackingLabel; // leave set to the backtracking label for all subsequent nodes MarkLabel(endLoopLabel); if (capturepos is not null) { // capturepos = base.CrawlPos(); Ldthis(); Call(s_crawlposMethod); Stloc(capturepos); } if (node.IsInLoop()) { // Store the loop's state // base.runstack[stackpos++] = startingPos; // base.runstack[stackpos++] = capturepos; // base.runstack[stackpos++] = iterationCount; EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); if (capturepos is not null) { EmitStackPush(() => Ldloc(capturepos)); } if (iterationCount is not null) { EmitStackPush(() => Ldloc(iterationCount)); } // Skip past the backtracking section Label backtrackingEnd = DefineLabel(); BrFar(backtrackingEnd); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // iterationCount = base.runstack[--stackpos]; // capturepos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; if (iterationCount is not null) { EmitStackPop(); Stloc(iterationCount); } if (capturepos is not null) { EmitStackPop(); Stloc(capturepos); } EmitStackPop(); Stloc(startingPos); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } void EmitLazy(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; Label originalDoneLabel = doneLabel; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually an atomic lazy loop, we need to output just the minimum number of iterations, // as nothing will backtrack into the lazy loop to get it progress further. if (isAtomic) { switch (minIterations) { case 0: // Atomic lazy with a min count of 0: nop. return; case 1: // Atomic lazy with a min count of 1: just output the child, no looping required. EmitNode(node.Child(0)); return; } } // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); LocalBuilder startingPos = DeclareInt32(); LocalBuilder iterationCount = DeclareInt32(); LocalBuilder sawEmpty = DeclareInt32(); Label body = DefineLabel(); Label endLoop = DefineLabel(); // iterationCount = 0; // startingPos = pos; // sawEmpty = 0; // false Ldc(0); Stloc(iterationCount); Ldloc(pos); Stloc(startingPos); Ldc(0); Stloc(sawEmpty); // If the min count is 0, start out by jumping right to what's after the loop. Backtracking // will then bring us back in to do further iterations. if (minIterations == 0) { // goto endLoop; BrFar(endLoop); } // Iteration body MarkLabel(body); EmitTimeoutCheck(); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. // base.runstack[stackpos++] = base.Crawlpos(); // base.runstack[stackpos++] = startingPos; // base.runstack[stackpos++] = pos; // base.runstack[stackpos++] = sawEmpty; EmitStackResizeIfNeeded(3); if (expressionHasCaptures) { EmitStackPush(() => { Ldthis(); Call(s_crawlposMethod); }); } EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(pos)); EmitStackPush(() => Ldloc(sawEmpty)); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. // iterationCount++; Ldloc(iterationCount); Ldc(1); Add(); Stloc(iterationCount); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. Label iterationFailedLabel = DefineLabel(); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 if (doneLabel == iterationFailedLabel) { doneLabel = originalDoneLabel; } // Loop condition. Continue iterating if we've not yet reached the minimum. if (minIterations > 0) { // if (iterationCount < minIterations) goto body; Ldloc(iterationCount); Ldc(minIterations); BltFar(body); } // If the last iteration was empty, we need to prevent further iteration from this point // unless we backtrack out of this iteration. We can do that easily just by pretending // we reached the max iteration count. // if (pos == startingPos) sawEmpty = 1; // true Label skipSawEmptySet = DefineLabel(); Ldloc(pos); Ldloc(startingPos); Bne(skipSawEmptySet); Ldc(1); Stloc(sawEmpty); MarkLabel(skipSawEmptySet); // We matched the next iteration. Jump to the subsequent code. // goto endLoop; BrFar(endLoop); // Now handle what happens when an iteration fails. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel); // iterationCount--; Ldloc(iterationCount); Ldc(1); Sub(); Stloc(iterationCount); // if (iterationCount < 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BltFar(originalDoneLabel); // sawEmpty = base.runstack[--stackpos]; // pos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; // capturepos = base.runstack[--stackpos]; // while (base.Crawlpos() > capturepos) base.Uncapture(); EmitStackPop(); Stloc(sawEmpty); EmitStackPop(); Stloc(pos); EmitStackPop(); Stloc(startingPos); if (expressionHasCaptures) { using RentedLocalBuilder poppedCrawlPos = RentInt32Local(); EmitStackPop(); Stloc(poppedCrawlPos); EmitUncaptureUntil(poppedCrawlPos); } SliceInputSpan(); if (doneLabel == originalDoneLabel) { // goto originalDoneLabel; BrFar(originalDoneLabel); } else { // if (iterationCount == 0) goto originalDoneLabel; // goto doneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); BrFar(doneLabel); } MarkLabel(endLoop); if (!isAtomic) { // Store the capture's state and skip the backtracking section EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(iterationCount)); EmitStackPush(() => Ldloc(sawEmpty)); Label skipBacktrack = DefineLabel(); BrFar(skipBacktrack); // Emit a backtracking section that restores the capture's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // sawEmpty = base.runstack[--stackpos]; // iterationCount = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(sawEmpty); EmitStackPop(); Stloc(iterationCount); EmitStackPop(); Stloc(startingPos); if (maxIterations == int.MaxValue) { // if (sawEmpty != 0) goto doneLabel; Ldloc(sawEmpty); Ldc(0); BneFar(doneLabel); } else { // if (iterationCount >= maxIterations || sawEmpty != 0) goto doneLabel; Ldloc(iterationCount); Ldc(maxIterations); BgeFar(doneLabel); Ldloc(sawEmpty); Ldc(0); BneFar(doneLabel); } // goto body; BrFar(body); doneLabel = backtrack; MarkLabel(skipBacktrack); } } // Emits the code to handle a loop (repeater) with a fixed number of iterations. // RegexNode.M is used for the number of iterations (RegexNode.N is ignored), as this // might be used to implement the required iterations of other kinds of loops. void EmitSingleCharRepeater(RegexNode node, bool emitLengthChecksIfRequired = true) { Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Kind}"); int iterations = node.M; switch (iterations) { case 0: // No iterations, nothing to do. return; case 1: // Just match the individual item EmitSingleChar(node, emitLengthChecksIfRequired); return; case <= RegexNode.MultiVsRepeaterLimit when node.IsOneFamily && !IsCaseInsensitive(node): // This is a repeated case-sensitive character; emit it as a multi in order to get all the optimizations // afforded to a multi, e.g. unrolling the loop with multi-char reads/comparisons at a time. EmitMultiCharString(new string(node.Ch, iterations), caseInsensitive: false, emitLengthChecksIfRequired); return; } // if ((uint)(sliceStaticPos + iterations - 1) >= (uint)slice.Length) goto doneLabel; if (emitLengthChecksIfRequired) { EmitSpanLengthCheck(iterations); } // Arbitrary limit for unrolling vs creating a loop. We want to balance size in the generated // code with other costs, like the (small) overhead of slicing to create the temp span to iterate. const int MaxUnrollSize = 16; if (iterations <= MaxUnrollSize) { // if (slice[sliceStaticPos] != c1 || // slice[sliceStaticPos + 1] != c2 || // ...) // goto doneLabel; for (int i = 0; i < iterations; i++) { EmitSingleChar(node, emitLengthCheck: false); } } else { // ReadOnlySpan<char> tmp = slice.Slice(sliceStaticPos, iterations); // for (int i = 0; i < tmp.Length; i++) // { // TimeoutCheck(); // if (tmp[i] != ch) goto Done; // } // sliceStaticPos += iterations; Label conditionLabel = DefineLabel(); Label bodyLabel = DefineLabel(); using RentedLocalBuilder spanLocal = RentReadOnlySpanCharLocal(); Ldloca(slice); Ldc(sliceStaticPos); Ldc(iterations); Call(s_spanSliceIntIntMethod); Stloc(spanLocal); using RentedLocalBuilder iterationLocal = RentInt32Local(); Ldc(0); Stloc(iterationLocal); BrFar(conditionLabel); MarkLabel(bodyLabel); EmitTimeoutCheck(); LocalBuilder tmpTextSpanLocal = slice; // we want EmitSingleChar to refer to this temporary int tmpTextSpanPos = sliceStaticPos; slice = spanLocal; sliceStaticPos = 0; EmitSingleChar(node, emitLengthCheck: false, offset: iterationLocal); slice = tmpTextSpanLocal; sliceStaticPos = tmpTextSpanPos; Ldloc(iterationLocal); Ldc(1); Add(); Stloc(iterationLocal); MarkLabel(conditionLabel); Ldloc(iterationLocal); Ldloca(spanLocal); Call(s_spanGetLengthMethod); BltFar(bodyLabel); sliceStaticPos += iterations; } } // Emits the code to handle a non-backtracking, variable-length loop around a single character comparison. void EmitSingleCharAtomicLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); // If this is actually a repeater, emit that instead. if (node.M == node.N) { EmitSingleCharRepeater(node); return; } // If this is actually an optional single char, emit that instead. if (node.M == 0 && node.N == 1) { EmitAtomicSingleCharZeroOrOne(node); return; } Debug.Assert(node.N > node.M); int minIterations = node.M; int maxIterations = node.N; using RentedLocalBuilder iterationLocal = RentInt32Local(); Label atomicLoopDoneLabel = DefineLabel(); Span<char> setChars = stackalloc char[5]; // max optimized by IndexOfAny today int numSetChars = 0; if (node.IsNotoneFamily && maxIterations == int.MaxValue && (!IsCaseInsensitive(node))) { // For Notone, we're looking for a specific character, as everything until we find // it is consumed by the loop. If we're unbounded, such as with ".*" and if we're case-sensitive, // we can use the vectorized IndexOf to do the search, rather than open-coding it. The unbounded // restriction is purely for simplicity; it could be removed in the future with additional code to // handle the unbounded case. // int i = slice.Slice(sliceStaticPos).IndexOf(char); if (sliceStaticPos > 0) { Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); } else { Ldloc(slice); } Ldc(node.Ch); Call(s_spanIndexOfChar); Stloc(iterationLocal); // if (i >= 0) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldc(0); BgeFar(atomicLoopDoneLabel); // i = slice.Length - sliceStaticPos; Ldloca(slice); Call(s_spanGetLengthMethod); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Sub(); } Stloc(iterationLocal); } else if (node.IsSetFamily && maxIterations == int.MaxValue && !IsCaseInsensitive(node) && (numSetChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0 && RegexCharClass.IsNegated(node.Str!)) { // If the set is negated and contains only a few characters (if it contained 1 and was negated, it would // have been reduced to a Notone), we can use an IndexOfAny to find any of the target characters. // As with the notoneloopatomic above, the unbounded constraint is purely for simplicity. Debug.Assert(numSetChars > 1); // int i = slice.Slice(sliceStaticPos).IndexOfAny(ch1, ch2, ...); if (sliceStaticPos > 0) { Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanSliceIntMethod); } else { Ldloc(slice); } switch (numSetChars) { case 2: Ldc(setChars[0]); Ldc(setChars[1]); Call(s_spanIndexOfAnyCharChar); break; case 3: Ldc(setChars[0]); Ldc(setChars[1]); Ldc(setChars[2]); Call(s_spanIndexOfAnyCharCharChar); break; default: Ldstr(setChars.Slice(0, numSetChars).ToString()); Call(s_stringAsSpanMethod); Call(s_spanIndexOfSpan); break; } Stloc(iterationLocal); // if (i >= 0) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldc(0); BgeFar(atomicLoopDoneLabel); // i = slice.Length - sliceStaticPos; Ldloca(slice); Call(s_spanGetLengthMethod); if (sliceStaticPos > 0) { Ldc(sliceStaticPos); Sub(); } Stloc(iterationLocal); } else if (node.IsSetFamily && maxIterations == int.MaxValue && node.Str == RegexCharClass.AnyClass) { // .* was used with RegexOptions.Singleline, which means it'll consume everything. Just jump to the end. // The unbounded constraint is the same as in the Notone case above, done purely for simplicity. // int i = inputSpan.Length - pos; TransferSliceStaticPosToPos(); Ldloca(inputSpan); Call(s_spanGetLengthMethod); Ldloc(pos); Sub(); Stloc(iterationLocal); } else { // For everything else, do a normal loop. // Transfer sliceStaticPos to pos to help with bounds check elimination on the loop. TransferSliceStaticPosToPos(); Label conditionLabel = DefineLabel(); Label bodyLabel = DefineLabel(); // int i = 0; Ldc(0); Stloc(iterationLocal); BrFar(conditionLabel); // Body: // TimeoutCheck(); MarkLabel(bodyLabel); EmitTimeoutCheck(); // if ((uint)i >= (uint)slice.Length) goto atomicLoopDoneLabel; Ldloc(iterationLocal); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(atomicLoopDoneLabel); // if (slice[i] != ch) goto atomicLoopDoneLabel; Ldloca(slice); Ldloc(iterationLocal); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(atomicLoopDoneLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(atomicLoopDoneLabel); } else // IsNotoneFamily { BeqFar(atomicLoopDoneLabel); } } // i++; Ldloc(iterationLocal); Ldc(1); Add(); Stloc(iterationLocal); // if (i >= maxIterations) goto atomicLoopDoneLabel; MarkLabel(conditionLabel); if (maxIterations != int.MaxValue) { Ldloc(iterationLocal); Ldc(maxIterations); BltFar(bodyLabel); } else { BrFar(bodyLabel); } } // Done: MarkLabel(atomicLoopDoneLabel); // Check to ensure we've found at least min iterations. if (minIterations > 0) { Ldloc(iterationLocal); Ldc(minIterations); BltFar(doneLabel); } // Now that we've completed our optional iterations, advance the text span // and pos by the number of iterations completed. // slice = slice.Slice(i); Ldloca(slice); Ldloc(iterationLocal); Call(s_spanSliceIntMethod); Stloc(slice); // pos += i; Ldloc(pos); Ldloc(iterationLocal); Add(); Stloc(pos); } // Emits the code to handle a non-backtracking optional zero-or-one loop. void EmitAtomicSingleCharZeroOrOne(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M == 0 && node.N == 1); Label skipUpdatesLabel = DefineLabel(); // if ((uint)sliceStaticPos >= (uint)slice.Length) goto skipUpdatesLabel; Ldc(sliceStaticPos); Ldloca(slice); Call(s_spanGetLengthMethod); BgeUnFar(skipUpdatesLabel); // if (slice[sliceStaticPos] != ch) goto skipUpdatesLabel; Ldloca(slice); Ldc(sliceStaticPos); Call(s_spanGetItemMethod); LdindU2(); if (node.IsSetFamily) { EmitMatchCharacterClass(node.Str!, IsCaseInsensitive(node)); BrfalseFar(skipUpdatesLabel); } else { if (IsCaseInsensitive(node)) { CallToLower(); } Ldc(node.Ch); if (node.IsOneFamily) { BneFar(skipUpdatesLabel); } else // IsNotoneFamily { BeqFar(skipUpdatesLabel); } } // slice = slice.Slice(1); Ldloca(slice); Ldc(1); Call(s_spanSliceIntMethod); Stloc(slice); // pos++; Ldloc(pos); Ldc(1); Add(); Stloc(pos); MarkLabel(skipUpdatesLabel); } void EmitNonBacktrackingRepeater(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.M == node.N, $"Unexpected M={node.M} == N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); Debug.Assert(!analysis.MayBacktrack(node.Child(0)), $"Expected non-backtracking node {node.Kind}"); // Ensure every iteration of the loop sees a consistent value. TransferSliceStaticPosToPos(); // Loop M==N times to match the child exactly that numbers of times. Label condition = DefineLabel(); Label body = DefineLabel(); // for (int i = 0; ...) using RentedLocalBuilder i = RentInt32Local(); Ldc(0); Stloc(i); BrFar(condition); MarkLabel(body); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // make sure static the static position remains at 0 for subsequent constructs // for (...; ...; i++) Ldloc(i); Ldc(1); Add(); Stloc(i); // for (...; i < node.M; ...) MarkLabel(condition); Ldloc(i); Ldc(node.M); BltFar(body); } void EmitLoop(RegexNode node) { Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop, $"Unexpected type: {node.Kind}"); Debug.Assert(node.M < int.MaxValue, $"Unexpected M={node.M}"); Debug.Assert(node.N >= node.M, $"Unexpected M={node.M}, N={node.N}"); Debug.Assert(node.ChildCount() == 1, $"Expected 1 child, found {node.ChildCount()}"); int minIterations = node.M; int maxIterations = node.N; bool isAtomic = analysis.IsAtomicByAncestor(node); // If this is actually a repeater and the child doesn't have any backtracking in it that might // cause us to need to unwind already taken iterations, just output it as a repeater loop. if (minIterations == maxIterations && !analysis.MayBacktrack(node.Child(0))) { EmitNonBacktrackingRepeater(node); return; } // We might loop any number of times. In order to ensure this loop and subsequent code sees sliceStaticPos // the same regardless, we always need it to contain the same value, and the easiest such value is 0. // So, we transfer sliceStaticPos to pos, and ensure that any path out of here has sliceStaticPos as 0. TransferSliceStaticPosToPos(); Label originalDoneLabel = doneLabel; LocalBuilder startingPos = DeclareInt32(); LocalBuilder iterationCount = DeclareInt32(); Label body = DefineLabel(); Label endLoop = DefineLabel(); // iterationCount = 0; // startingPos = 0; Ldc(0); Stloc(iterationCount); Ldc(0); Stloc(startingPos); // Iteration body MarkLabel(body); EmitTimeoutCheck(); // We need to store the starting pos and crawl position so that it may // be backtracked through later. This needs to be the starting position from // the iteration we're leaving, so it's pushed before updating it to pos. EmitStackResizeIfNeeded(3); if (expressionHasCaptures) { // base.runstack[stackpos++] = base.Crawlpos(); EmitStackPush(() => { Ldthis(); Call(s_crawlposMethod); }); } EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(pos)); // Save off some state. We need to store the current pos so we can compare it against // pos after the iteration, in order to determine whether the iteration was empty. Empty // iterations are allowed as part of min matches, but once we've met the min quote, empty matches // are considered match failures. // startingPos = pos; Ldloc(pos); Stloc(startingPos); // Proactively increase the number of iterations. We do this prior to the match rather than once // we know it's successful, because we need to decrement it as part of a failed match when // backtracking; it's thus simpler to just always decrement it as part of a failed match, even // when initially greedily matching the loop, which then requires we increment it before trying. // iterationCount++; Ldloc(iterationCount); Ldc(1); Add(); Stloc(iterationCount); // Last but not least, we need to set the doneLabel that a failed match of the body will jump to. // Such an iteration match failure may or may not fail the whole operation, depending on whether // we've already matched the minimum required iterations, so we need to jump to a location that // will make that determination. Label iterationFailedLabel = DefineLabel(); doneLabel = iterationFailedLabel; // Finally, emit the child. Debug.Assert(sliceStaticPos == 0); EmitNode(node.Child(0)); TransferSliceStaticPosToPos(); // ensure sliceStaticPos remains 0 bool childBacktracks = doneLabel != iterationFailedLabel; // Loop condition. Continue iterating greedily if we've not yet reached the maximum. We also need to stop // iterating if the iteration matched empty and we already hit the minimum number of iterations. Otherwise, // we've matched as many iterations as we can with this configuration. Jump to what comes after the loop. switch ((minIterations > 0, maxIterations == int.MaxValue)) { case (true, true): // if (pos != startingPos || iterationCount < minIterations) goto body; // goto endLoop; Ldloc(pos); Ldloc(startingPos); BneFar(body); Ldloc(iterationCount); Ldc(minIterations); BltFar(body); BrFar(endLoop); break; case (true, false): // if ((pos != startingPos || iterationCount < minIterations) && iterationCount < maxIterations) goto body; // goto endLoop; Ldloc(iterationCount); Ldc(maxIterations); BgeFar(endLoop); Ldloc(pos); Ldloc(startingPos); BneFar(body); Ldloc(iterationCount); Ldc(minIterations); BltFar(body); BrFar(endLoop); break; case (false, true): // if (pos != startingPos) goto body; // goto endLoop; Ldloc(pos); Ldloc(startingPos); BneFar(body); BrFar(endLoop); break; case (false, false): // if (pos == startingPos || iterationCount >= maxIterations) goto endLoop; // goto body; Ldloc(pos); Ldloc(startingPos); BeqFar(endLoop); Ldloc(iterationCount); Ldc(maxIterations); BgeFar(endLoop); BrFar(body); break; } // Now handle what happens when an iteration fails, which could be an initial failure or it // could be while backtracking. We need to reset state to what it was before just that iteration // started. That includes resetting pos and clearing out any captures from that iteration. MarkLabel(iterationFailedLabel); // iterationCount--; Ldloc(iterationCount); Ldc(1); Sub(); Stloc(iterationCount); // if (iterationCount < 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BltFar(originalDoneLabel); // pos = base.runstack[--stackpos]; // startingPos = base.runstack[--stackpos]; EmitStackPop(); Stloc(pos); EmitStackPop(); Stloc(startingPos); if (expressionHasCaptures) { // int poppedCrawlPos = base.runstack[--stackpos]; // while (base.Crawlpos() > poppedCrawlPos) base.Uncapture(); using RentedLocalBuilder poppedCrawlPos = RentInt32Local(); EmitStackPop(); Stloc(poppedCrawlPos); EmitUncaptureUntil(poppedCrawlPos); } SliceInputSpan(); if (minIterations > 0) { // if (iterationCount == 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); // if (iterationCount < minIterations) goto doneLabel/originalDoneLabel; Ldloc(iterationCount); Ldc(minIterations); BltFar(childBacktracks ? doneLabel : originalDoneLabel); } if (isAtomic) { doneLabel = originalDoneLabel; MarkLabel(endLoop); } else { if (childBacktracks) { // goto endLoop; BrFar(endLoop); // Backtrack: Label backtrack = DefineLabel(); MarkLabel(backtrack); // if (iterationCount == 0) goto originalDoneLabel; Ldloc(iterationCount); Ldc(0); BeqFar(originalDoneLabel); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; } MarkLabel(endLoop); if (node.IsInLoop()) { // Store the loop's state EmitStackResizeIfNeeded(3); EmitStackPush(() => Ldloc(startingPos)); EmitStackPush(() => Ldloc(iterationCount)); // Skip past the backtracking section // goto backtrackingEnd; Label backtrackingEnd = DefineLabel(); BrFar(backtrackingEnd); // Emit a backtracking section that restores the loop's state and then jumps to the previous done label Label backtrack = DefineLabel(); MarkLabel(backtrack); // iterationCount = base.runstack[--runstack]; // startingPos = base.runstack[--runstack]; EmitStackPop(); Stloc(iterationCount); EmitStackPop(); Stloc(startingPos); // goto doneLabel; BrFar(doneLabel); doneLabel = backtrack; MarkLabel(backtrackingEnd); } } } void EmitStackResizeIfNeeded(int count) { Debug.Assert(count >= 1); // if (stackpos >= base.runstack!.Length - (count - 1)) // { // Array.Resize(ref base.runstack, base.runstack.Length * 2); // } Label skipResize = DefineLabel(); Ldloc(stackpos); Ldthisfld(s_runstackField); Ldlen(); if (count > 1) { Ldc(count - 1); Sub(); } Blt(skipResize); Ldthis(); _ilg!.Emit(OpCodes.Ldflda, s_runstackField); Ldthisfld(s_runstackField); Ldlen(); Ldc(2); Mul(); Call(s_arrayResize); MarkLabel(skipResize); } void EmitStackPush(Action load) { // base.runstack[stackpos] = load(); Ldthisfld(s_runstackField); Ldloc(stackpos); load(); StelemI4(); // stackpos++; Ldloc(stackpos); Ldc(1); Add(); Stloc(stackpos); } void EmitStackPop() { // ... = base.runstack[--stackpos]; Ldthisfld(s_runstackField); Ldloc(stackpos); Ldc(1); Sub(); Stloc(stackpos); Ldloc(stackpos); LdelemI4(); } } protected void EmitScan(DynamicMethod tryFindNextStartingPositionMethod, DynamicMethod tryMatchAtCurrentPositionMethod) { Label returnLabel = DefineLabel(); // while (TryFindNextPossibleStartingPosition(text)) Label whileLoopBody = DefineLabel(); MarkLabel(whileLoopBody); Ldthis(); Ldarg_1(); Call(tryFindNextStartingPositionMethod); BrfalseFar(returnLabel); if (_hasTimeout) { // CheckTimeout(); Ldthis(); Call(s_checkTimeoutMethod); } // if (TryMatchAtCurrentPosition(text) || runtextpos == text.length) // return; Ldthis(); Ldarg_1(); Call(tryMatchAtCurrentPositionMethod); BrtrueFar(returnLabel); Ldthisfld(s_runtextposField); Ldarga_s(1); Call(s_spanGetLengthMethod); Ceq(); BrtrueFar(returnLabel); // runtextpos += 1 Ldthis(); Ldthisfld(s_runtextposField); Ldc(1); Add(); Stfld(s_runtextposField); // End loop body. BrFar(whileLoopBody); // return; MarkLabel(returnLabel); Ret(); } private void InitializeCultureForTryMatchAtCurrentPositionIfNecessary(AnalysisResults analysis) { _textInfo = null; if (analysis.HasIgnoreCase && (_options & RegexOptions.CultureInvariant) == 0) { // cache CultureInfo in local variable which saves excessive thread local storage accesses _textInfo = DeclareTextInfo(); InitLocalCultureInfo(); } } /// <summary>Emits a a check for whether the character is in the specified character class.</summary> /// <remarks>The character to be checked has already been loaded onto the stack.</remarks> private void EmitMatchCharacterClass(string charClass, bool caseInsensitive) { // We need to perform the equivalent of calling RegexRunner.CharInClass(ch, charClass), // but that call is relatively expensive. Before we fall back to it, we try to optimize // some common cases for which we can do much better, such as known character classes // for which we can call a dedicated method, or a fast-path for ASCII using a lookup table. // First, see if the char class is a built-in one for which there's a better function // we can just call directly. Everything in this section must work correctly for both // case-sensitive and case-insensitive modes, regardless of culture. switch (charClass) { case RegexCharClass.AnyClass: // true Pop(); Ldc(1); return; case RegexCharClass.DigitClass: // char.IsDigit(ch) Call(s_charIsDigitMethod); return; case RegexCharClass.NotDigitClass: // !char.IsDigit(ch) Call(s_charIsDigitMethod); Ldc(0); Ceq(); return; case RegexCharClass.SpaceClass: // char.IsWhiteSpace(ch) Call(s_charIsWhiteSpaceMethod); return; case RegexCharClass.NotSpaceClass: // !char.IsWhiteSpace(ch) Call(s_charIsWhiteSpaceMethod); Ldc(0); Ceq(); return; case RegexCharClass.WordClass: // RegexRunner.IsWordChar(ch) Call(s_isWordCharMethod); return; case RegexCharClass.NotWordClass: // !RegexRunner.IsWordChar(ch) Call(s_isWordCharMethod); Ldc(0); Ceq(); return; } // If we're meant to be doing a case-insensitive lookup, and if we're not using the invariant culture, // lowercase the input. If we're using the invariant culture, we may still end up calling ToLower later // on, but we may also be able to avoid it, in particular in the case of our lookup table, where we can // generate the lookup table already factoring in the invariant case sensitivity. There are multiple // special-code paths between here and the lookup table, but we only take those if invariant is false; // if it were true, they'd need to use CallToLower(). bool invariant = false; if (caseInsensitive) { invariant = UseToLowerInvariant; if (!invariant) { CallToLower(); } } // Next, handle simple sets of one range, e.g. [A-Z], [0-9], etc. This includes some built-in classes, like ECMADigitClass. if (!invariant && RegexCharClass.TryGetSingleRange(charClass, out char lowInclusive, out char highInclusive)) { if (lowInclusive == highInclusive) { // ch == charClass[3] Ldc(lowInclusive); Ceq(); } else { // (uint)ch - lowInclusive < highInclusive - lowInclusive + 1 Ldc(lowInclusive); Sub(); Ldc(highInclusive - lowInclusive + 1); CltUn(); } // Negate the answer if the negation flag was set if (RegexCharClass.IsNegated(charClass)) { Ldc(0); Ceq(); } return; } // Next if the character class contains nothing but a single Unicode category, we can calle char.GetUnicodeCategory and // compare against it. It has a fast-lookup path for ASCII, so is as good or better than any lookup we'd generate (plus // we get smaller code), and it's what we'd do for the fallback (which we get to avoid generating) as part of CharInClass. if (!invariant && RegexCharClass.TryGetSingleUnicodeCategory(charClass, out UnicodeCategory category, out bool negated)) { // char.GetUnicodeCategory(ch) == category Call(s_charGetUnicodeInfo); Ldc((int)category); Ceq(); if (negated) { Ldc(0); Ceq(); } return; } // All checks after this point require reading the input character multiple times, // so we store it into a temporary local. using RentedLocalBuilder tempLocal = RentInt32Local(); Stloc(tempLocal); // Next, if there's only 2 or 3 chars in the set (fairly common due to the sets we create for prefixes), // it's cheaper and smaller to compare against each than it is to use a lookup table. if (!invariant && !RegexCharClass.IsNegated(charClass)) { Span<char> setChars = stackalloc char[3]; int numChars = RegexCharClass.GetSetChars(charClass, setChars); if (numChars is 2 or 3) { if (RegexCharClass.DifferByOneBit(setChars[0], setChars[1], out int mask)) // special-case common case of an upper and lowercase ASCII letter combination { // ((ch | mask) == setChars[1]) Ldloc(tempLocal); Ldc(mask); Or(); Ldc(setChars[1] | mask); Ceq(); } else { // (ch == setChars[0]) | (ch == setChars[1]) Ldloc(tempLocal); Ldc(setChars[0]); Ceq(); Ldloc(tempLocal); Ldc(setChars[1]); Ceq(); Or(); } // | (ch == setChars[2]) if (numChars == 3) { Ldloc(tempLocal); Ldc(setChars[2]); Ceq(); Or(); } return; } } using RentedLocalBuilder resultLocal = RentInt32Local(); // Analyze the character set more to determine what code to generate. RegexCharClass.CharClassAnalysisResults analysis = RegexCharClass.Analyze(charClass); // Helper method that emits a call to RegexRunner.CharInClass(ch{.ToLowerInvariant()}, charClass) void EmitCharInClass() { Ldloc(tempLocal); if (invariant) { CallToLower(); } Ldstr(charClass); Call(s_charInClassMethod); Stloc(resultLocal); } Label doneLabel = DefineLabel(); Label comparisonLabel = DefineLabel(); if (!invariant) // if we're being asked to do a case insensitive, invariant comparison, use the lookup table { if (analysis.ContainsNoAscii) { // We determined that the character class contains only non-ASCII, // for example if the class were [\p{IsGreek}\p{IsGreekExtended}], which is // the same as [\u0370-\u03FF\u1F00-1FFF]. (In the future, we could possibly // extend the analysis to produce a known lower-bound and compare against // that rather than always using 128 as the pivot point.) // ch >= 128 && RegexRunner.CharInClass(ch, "...") Ldloc(tempLocal); Ldc(128); Blt(comparisonLabel); EmitCharInClass(); Br(doneLabel); MarkLabel(comparisonLabel); Ldc(0); Stloc(resultLocal); MarkLabel(doneLabel); Ldloc(resultLocal); return; } if (analysis.AllAsciiContained) { // We determined that every ASCII character is in the class, for example // if the class were the negated example from case 1 above: // [^\p{IsGreek}\p{IsGreekExtended}]. // ch < 128 || RegexRunner.CharInClass(ch, "...") Ldloc(tempLocal); Ldc(128); Blt(comparisonLabel); EmitCharInClass(); Br(doneLabel); MarkLabel(comparisonLabel); Ldc(1); Stloc(resultLocal); MarkLabel(doneLabel); Ldloc(resultLocal); return; } } // Now, our big hammer is to generate a lookup table that lets us quickly index by character into a yes/no // answer as to whether the character is in the target character class. However, we don't want to store // a lookup table for every possible character for every character class in the regular expression; at one // bit for each of 65K characters, that would be an 8K bitmap per character class. Instead, we handle the // common case of ASCII input via such a lookup table, which at one bit for each of 128 characters is only // 16 bytes per character class. We of course still need to be able to handle inputs that aren't ASCII, so // we check the input against 128, and have a fallback if the input is >= to it. Determining the right // fallback could itself be expensive. For example, if it's possible that a value >= 128 could match the // character class, we output a call to RegexRunner.CharInClass, but we don't want to have to enumerate the // entire character class evaluating every character against it, just to determine whether it's a match. // Instead, we employ some quick heuristics that will always ensure we provide a correct answer even if // we could have sometimes generated better code to give that answer. // Generate the lookup table to store 128 answers as bits. We use a const string instead of a byte[] / static // data property because it lets IL emit handle all the details for us. string bitVectorString = string.Create(8, (charClass, invariant), static (dest, state) => // String length is 8 chars == 16 bytes == 128 bits. { for (int i = 0; i < 128; i++) { char c = (char)i; bool isSet = state.invariant ? RegexCharClass.CharInClass(char.ToLowerInvariant(c), state.charClass) : RegexCharClass.CharInClass(c, state.charClass); if (isSet) { dest[i >> 4] |= (char)(1 << (i & 0xF)); } } }); // We determined that the character class may contain ASCII, so we // output the lookup against the lookup table. // ch < 128 ? (bitVectorString[ch >> 4] & (1 << (ch & 0xF))) != 0 : Ldloc(tempLocal); Ldc(128); Bge(comparisonLabel); Ldstr(bitVectorString); Ldloc(tempLocal); Ldc(4); Shr(); Call(s_stringGetCharsMethod); Ldc(1); Ldloc(tempLocal); Ldc(15); And(); Ldc(31); And(); Shl(); And(); Ldc(0); CgtUn(); Stloc(resultLocal); Br(doneLabel); MarkLabel(comparisonLabel); if (analysis.ContainsOnlyAscii) { // We know that all inputs that could match are ASCII, for example if the // character class were [A-Za-z0-9], so since the ch is now known to be >= 128, we // can just fail the comparison. Ldc(0); Stloc(resultLocal); } else if (analysis.AllNonAsciiContained) { // We know that all non-ASCII inputs match, for example if the character // class were [^\r\n], so since we just determined the ch to be >= 128, we can just // give back success. Ldc(1); Stloc(resultLocal); } else { // We know that the whole class wasn't ASCII, and we don't know anything about the non-ASCII // characters other than that some might be included, for example if the character class // were [\w\d], so since ch >= 128, we need to fall back to calling CharInClass. EmitCharInClass(); } MarkLabel(doneLabel); Ldloc(resultLocal); } /// <summary>Emits a timeout check.</summary> private void EmitTimeoutCheck() { if (!_hasTimeout) { return; } Debug.Assert(_loopTimeoutCounter != null); // Increment counter for each loop iteration. Ldloc(_loopTimeoutCounter); Ldc(1); Add(); Stloc(_loopTimeoutCounter); // Emit code to check the timeout every 2048th iteration. Label label = DefineLabel(); Ldloc(_loopTimeoutCounter); Ldc(LoopTimeoutCheckCount); RemUn(); Brtrue(label); Ldthis(); Call(s_checkTimeoutMethod); MarkLabel(label); } } }
1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/RegexNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Threading; namespace System.Text.RegularExpressions { /// <summary>Represents a regex subexpression.</summary> internal sealed class RegexNode { /// <summary>empty bit from the node's options to store data on whether a node contains captures</summary> internal const RegexOptions HasCapturesFlag = (RegexOptions)(1 << 31); /// <summary>Arbitrary number of repetitions of the same character when we'd prefer to represent that as a repeater of that character rather than a string.</summary> internal const int MultiVsRepeaterLimit = 64; /// <summary>The node's children.</summary> /// <remarks>null if no children, a <see cref="RegexNode"/> if one child, or a <see cref="List{RegexNode}"/> if multiple children.</remarks> private object? Children; /// <summary>The kind of expression represented by this node.</summary> public RegexNodeKind Kind { get; private set; } /// <summary>A string associated with the node.</summary> /// <remarks>For a <see cref="RegexNodeKind.Multi"/>, this is the string from the expression. For an <see cref="IsSetFamily"/> node, this is the character class string from <see cref="RegexCharClass"/>.</remarks> public string? Str { get; private set; } /// <summary>The character associated with the node.</summary> /// <remarks>For a <see cref="IsOneFamily"/> or <see cref="IsNotoneFamily"/> node, the character from the expression.</remarks> public char Ch { get; private set; } /// <summary>The minimum number of iterations for a loop, or the capture group number for a capture or backreference.</summary> /// <remarks>No minimum is represented by 0. No capture group is represented by -1.</remarks> public int M { get; private set; } /// <summary>The maximum number of iterations for a loop, or the uncapture group number for a balancing group.</summary> /// <remarks>No upper bound is represented by <see cref="int.MaxValue"/>. No capture group is represented by -1.</remarks> public int N { get; private set; } /// <summary>The options associated with the node.</summary> public RegexOptions Options; /// <summary>The node's parent node in the tree.</summary> /// <remarks> /// During parsing, top-level nodes are also stacked onto a parse stack (a stack of trees) using <see cref="Parent"/>. /// After parsing, <see cref="Parent"/> is the node in the tree that has this node as or in <see cref="Children"/>. /// </remarks> public RegexNode? Parent; public RegexNode(RegexNodeKind kind, RegexOptions options) { Kind = kind; Options = options; } public RegexNode(RegexNodeKind kind, RegexOptions options, char ch) { Kind = kind; Options = options; Ch = ch; } public RegexNode(RegexNodeKind kind, RegexOptions options, string str) { Kind = kind; Options = options; Str = str; } public RegexNode(RegexNodeKind kind, RegexOptions options, int m) { Kind = kind; Options = options; M = m; } public RegexNode(RegexNodeKind kind, RegexOptions options, int m, int n) { Kind = kind; Options = options; M = m; N = n; } /// <summary>Creates a RegexNode representing a single character.</summary> /// <param name="ch">The character.</param> /// <param name="options">The node's options.</param> /// <param name="culture">The culture to use to perform any required transformations.</param> /// <returns>The created RegexNode. This might be a RegexNode.One or a RegexNode.Set.</returns> public static RegexNode CreateOneWithCaseConversion(char ch, RegexOptions options, CultureInfo? culture) { // If the options specify case-insensitivity, we try to create a node that fully encapsulates that. if ((options & RegexOptions.IgnoreCase) != 0) { Debug.Assert(culture is not null); // If the character is part of a Unicode category that doesn't participate in case conversion, // we can simply strip out the IgnoreCase option and make the node case-sensitive. if (!RegexCharClass.ParticipatesInCaseConversion(ch)) { return new RegexNode(RegexNodeKind.One, options & ~RegexOptions.IgnoreCase, ch); } // Create a set for the character, trying to include all case-insensitive equivalent characters. // If it's successful in doing so, resultIsCaseInsensitive will be false and we can strip // out RegexOptions.IgnoreCase as part of creating the set. string stringSet = RegexCharClass.OneToStringClass(ch, culture, out bool resultIsCaseInsensitive); if (!resultIsCaseInsensitive) { return new RegexNode(RegexNodeKind.Set, options & ~RegexOptions.IgnoreCase, stringSet); } // Otherwise, until we can get rid of ToLower usage at match time entirely (https://github.com/dotnet/runtime/issues/61048), // lowercase the character and proceed to create an IgnoreCase One node. ch = culture.TextInfo.ToLower(ch); } // Create a One node for the character. return new RegexNode(RegexNodeKind.One, options, ch); } /// <summary>Reverses all children of a concatenation when in RightToLeft mode.</summary> public RegexNode ReverseConcatenationIfRightToLeft() { if ((Options & RegexOptions.RightToLeft) != 0 && Kind == RegexNodeKind.Concatenate && ChildCount() > 1) { ((List<RegexNode>)Children!).Reverse(); } return this; } /// <summary> /// Pass type as OneLazy or OneLoop /// </summary> private void MakeRep(RegexNodeKind kind, int min, int max) { Kind += kind - RegexNodeKind.One; M = min; N = max; } private void MakeLoopAtomic() { switch (Kind) { case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop: // For loops, we simply change the Type to the atomic variant. // Atomic greedy loops should consume as many values as they can. Kind += RegexNodeKind.Oneloopatomic - RegexNodeKind.Oneloop; break; case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy: // For lazy, we not only change the Type, we also lower the max number of iterations // to the minimum number of iterations, creating a repeater, as they should end up // matching as little as possible. Kind += RegexNodeKind.Oneloopatomic - RegexNodeKind.Onelazy; N = M; if (N == 0) { // If moving the max to be the same as the min dropped it to 0, there's no // work to be done for this node, and we can make it Empty. Kind = RegexNodeKind.Empty; Str = null; Ch = '\0'; } else if (Kind == RegexNodeKind.Oneloopatomic && N is >= 2 and <= MultiVsRepeaterLimit) { // If this is now a One repeater with a small enough length, // make it a Multi instead, as they're better optimized down the line. Kind = RegexNodeKind.Multi; Str = new string(Ch, N); Ch = '\0'; M = N = 0; } break; default: Debug.Fail($"Unexpected type: {Kind}"); break; } } #if DEBUG /// <summary>Validate invariants the rest of the implementation relies on for processing fully-built trees.</summary> [Conditional("DEBUG")] private void ValidateFinalTreeInvariants() { Debug.Assert(Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); var toExamine = new Stack<RegexNode>(); toExamine.Push(this); while (toExamine.Count > 0) { RegexNode node = toExamine.Pop(); // Add all children to be examined int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { RegexNode child = node.Child(i); Debug.Assert(child.Parent == node, $"{child.Describe()} missing reference to parent {node.Describe()}"); toExamine.Push(child); } // Validate that we never see certain node types. Debug.Assert(Kind != RegexNodeKind.Group, "All Group nodes should have been removed."); // Validate node types and expected child counts. switch (node.Kind) { case RegexNodeKind.Group: Debug.Fail("All Group nodes should have been removed."); break; case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.Empty: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.Multi: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Nothing: case RegexNodeKind.Notone: case RegexNodeKind.Notonelazy: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.One: case RegexNodeKind.Onelazy: case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Backreference: case RegexNodeKind.Set: case RegexNodeKind.Setlazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Start: case RegexNodeKind.UpdateBumpalong: Debug.Assert(childCount == 0, $"Expected zero children for {node.Kind}, got {childCount}."); break; case RegexNodeKind.Atomic: case RegexNodeKind.Capture: case RegexNodeKind.Lazyloop: case RegexNodeKind.Loop: case RegexNodeKind.NegativeLookaround: case RegexNodeKind.PositiveLookaround: Debug.Assert(childCount == 1, $"Expected one and only one child for {node.Kind}, got {childCount}."); break; case RegexNodeKind.BackreferenceConditional: Debug.Assert(childCount == 2, $"Expected two children for {node.Kind}, got {childCount}"); break; case RegexNodeKind.ExpressionConditional: Debug.Assert(childCount == 3, $"Expected three children for {node.Kind}, got {childCount}"); break; case RegexNodeKind.Concatenate: case RegexNodeKind.Alternate: Debug.Assert(childCount >= 2, $"Expected at least two children for {node.Kind}, got {childCount}."); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } // Validate node configuration. switch (node.Kind) { case RegexNodeKind.Multi: Debug.Assert(node.Str is not null, "Expect non-null multi string"); Debug.Assert(node.Str.Length >= 2, $"Expected {node.Str} to be at least two characters"); break; case RegexNodeKind.Set: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: Debug.Assert(!string.IsNullOrEmpty(node.Str), $"Expected non-null, non-empty string for {node.Kind}."); break; default: Debug.Assert(node.Str is null, $"Expected null string for {node.Kind}, got \"{node.Str}\"."); break; } } } #endif /// <summary>Performs additional optimizations on an entire tree prior to being used.</summary> /// <remarks> /// Some optimizations are performed by the parser while parsing, and others are performed /// as nodes are being added to the tree. The optimizations here expect the tree to be fully /// formed, as they inspect relationships between nodes that may not have been in place as /// individual nodes were being processed/added to the tree. /// </remarks> internal RegexNode FinalOptimize() { RegexNode rootNode = this; Debug.Assert(rootNode.Kind == RegexNodeKind.Capture); Debug.Assert(rootNode.Parent is null); Debug.Assert(rootNode.ChildCount() == 1); // Only apply optimization when LTR to avoid needing additional code for the much rarer RTL case. // Also only apply these optimizations when not using NonBacktracking, as these optimizations are // all about avoiding things that are impactful for the backtracking engines but nops for non-backtracking. if ((Options & (RegexOptions.RightToLeft | RegexOptions.NonBacktracking)) == 0) { // Optimization: eliminate backtracking for loops. // For any single-character loop (Oneloop, Notoneloop, Setloop), see if we can automatically convert // that into its atomic counterpart (Oneloopatomic, Notoneloopatomic, Setloopatomic) based on what // comes after it in the expression tree. rootNode.FindAndMakeLoopsAtomic(); // Optimization: backtracking removal at expression end. // If we find backtracking construct at the end of the regex, we can instead make it non-backtracking, // since nothing would ever backtrack into it anyway. Doing this then makes the construct available // to implementations that don't support backtracking. rootNode.EliminateEndingBacktracking(); // Optimization: unnecessary re-processing of starting loops. // If an expression is guaranteed to begin with a single-character unbounded loop that isn't part of an alternation (in which case it // wouldn't be guaranteed to be at the beginning) or a capture (in which case a back reference could be influenced by its length), then we // can update the tree with a temporary node to indicate that the implementation should use that node's ending position in the input text // as the next starting position at which to start the next match. This avoids redoing matches we've already performed, e.g. matching // "\[email protected]" against "is this a valid [email protected]", the \w+ will initially match the "is" and then will fail to match the "@". // Rather than bumping the scan loop by 1 and trying again to match at the "s", we can instead start at the " ". For functional correctness // we can only consider unbounded loops, as to be able to start at the end of the loop we need the loop to have consumed all possible matches; // otherwise, you could end up with a pattern like "a{1,3}b" matching against "aaaabc", which should match, but if we pre-emptively stop consuming // after the first three a's and re-start from that position, we'll end up failing the match even though it should have succeeded. We can also // apply this optimization to non-atomic loops: even though backtracking could be necessary, such backtracking would be handled within the processing // of a single starting position. Lazy loops similarly benefit, as a failed match will result in exploring the exact same search space as with // a greedy loop, just in the opposite order (and a successful match will overwrite the bumpalong position); we need to avoid atomic lazy loops, // however, as they will only end up as a repeater for the minimum length and thus will effectively end up with a non-infinite upper bound, which // we've already outlined is problematic. { RegexNode node = rootNode.Child(0); // skip implicit root capture node bool atomicByAncestry = true; // the root is implicitly atomic because nothing comes after it (same for the implicit root capture) while (true) { switch (node.Kind) { case RegexNodeKind.Atomic: node = node.Child(0); continue; case RegexNodeKind.Concatenate: atomicByAncestry = false; node = node.Child(0); continue; case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when node.N == int.MaxValue: case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy when node.N == int.MaxValue && !atomicByAncestry: if (node.Parent is { Kind: RegexNodeKind.Concatenate } parent) { parent.InsertChild(1, new RegexNode(RegexNodeKind.UpdateBumpalong, node.Options)); } break; } break; } } } // Done optimizing. Return the final tree. #if DEBUG rootNode.ValidateFinalTreeInvariants(); #endif return rootNode; } /// <summary>Converts nodes at the end of the node tree to be atomic.</summary> /// <remarks> /// The correctness of this optimization depends on nothing being able to backtrack into /// the provided node. That means it must be at the root of the overall expression, or /// it must be an Atomic node that nothing will backtrack into by the very nature of Atomic. /// </remarks> private void EliminateEndingBacktracking() { if (!StackHelper.TryEnsureSufficientExecutionStack() || (Options & (RegexOptions.RightToLeft | RegexOptions.NonBacktracking)) != 0) { // If we can't recur further, just stop optimizing. // We haven't done the work to validate this is correct for RTL. // And NonBacktracking doesn't support atomic groups and doesn't have backtracking to be eliminated. return; } // Walk the tree starting from the current node. RegexNode node = this; while (true) { switch (node.Kind) { // {One/Notone/Set}loops can be upgraded to {One/Notone/Set}loopatomic nodes, e.g. [abc]* => (?>[abc]*). // And {One/Notone/Set}lazys can similarly be upgraded to be atomic, which really makes them into repeaters // or even empty nodes. case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop: case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy: node.MakeLoopAtomic(); break; // Just because a particular node is atomic doesn't mean all its descendants are. // Process them as well. case RegexNodeKind.Atomic: node = node.Child(0); continue; // For Capture and Concatenate, we just recur into their last child (only child in the case // of Capture). However, if the child is an alternation or loop, we can also make the // node itself atomic by wrapping it in an Atomic node. Since we later check to see whether a // node is atomic based on its parent or grandparent, we don't bother wrapping such a node in // an Atomic one if its grandparent is already Atomic. // e.g. [xyz](?:abc|def) => [xyz](?>abc|def) case RegexNodeKind.Capture: case RegexNodeKind.Concatenate: RegexNode existingChild = node.Child(node.ChildCount() - 1); if ((existingChild.Kind is RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional or RegexNodeKind.Loop or RegexNodeKind.Lazyloop) && (node.Parent is null || node.Parent.Kind != RegexNodeKind.Atomic)) // validate grandparent isn't atomic { var atomic = new RegexNode(RegexNodeKind.Atomic, existingChild.Options); atomic.AddChild(existingChild); node.ReplaceChild(node.ChildCount() - 1, atomic); } node = existingChild; continue; // For alternate, we can recur into each branch separately. We use this iteration for the first branch. // Conditionals are just like alternations in this regard. // e.g. abc*|def* => ab(?>c*)|de(?>f*) case RegexNodeKind.Alternate: case RegexNodeKind.BackreferenceConditional: case RegexNodeKind.ExpressionConditional: { int branches = node.ChildCount(); for (int i = 1; i < branches; i++) { node.Child(i).EliminateEndingBacktracking(); } if (node.Kind != RegexNodeKind.ExpressionConditional) // ReduceTestgroup will have already applied ending backtracking removal { node = node.Child(0); continue; } } break; // For {Lazy}Loop, we search to see if there's a viable last expression, and iff there // is we recur into processing it. Also, as with the single-char lazy loops, LazyLoop // can have its max iteration count dropped to its min iteration count, as there's no // reason for it to match more than the minimal at the end; that in turn makes it a // repeater, which results in better code generation. // e.g. (?:abc*)* => (?:ab(?>c*))* // e.g. (abc*?)+? => (ab){1} case RegexNodeKind.Lazyloop: node.N = node.M; goto case RegexNodeKind.Loop; case RegexNodeKind.Loop: { if (node.N == 1) { // If the loop has a max iteration count of 1 (e.g. it's an optional node), // there's no possibility for conflict between multiple iterations, so // we can process it. node = node.Child(0); continue; } RegexNode? loopDescendent = node.FindLastExpressionInLoopForAutoAtomic(); if (loopDescendent != null) { node = loopDescendent; continue; // loop around to process node } } break; } break; } } /// <summary> /// Removes redundant nodes from the subtree, and returns an optimized subtree. /// </summary> internal RegexNode Reduce() { // TODO: https://github.com/dotnet/runtime/issues/61048 // As part of overhauling IgnoreCase handling, the parser shouldn't produce any nodes other than Backreference // that ever have IgnoreCase set on them. For now, though, remove IgnoreCase from any nodes for which it // has no behavioral effect. switch (Kind) { default: // No effect Options &= ~RegexOptions.IgnoreCase; break; case RegexNodeKind.One or RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notone or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Set or RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic: case RegexNodeKind.Multi: case RegexNodeKind.Backreference: // Still meaningful break; } return Kind switch { RegexNodeKind.Alternate => ReduceAlternation(), RegexNodeKind.Atomic => ReduceAtomic(), RegexNodeKind.Concatenate => ReduceConcatenation(), RegexNodeKind.Group => ReduceGroup(), RegexNodeKind.Loop or RegexNodeKind.Lazyloop => ReduceLoops(), RegexNodeKind.NegativeLookaround => ReducePrevent(), RegexNodeKind.PositiveLookaround => ReduceRequire(), RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy => ReduceSet(), RegexNodeKind.ExpressionConditional => ReduceTestgroup(), RegexNodeKind.BackreferenceConditional => ReduceTestref(), _ => this, }; } /// <summary>Remove an unnecessary Concatenation or Alternation node</summary> /// <remarks> /// Simple optimization for a concatenation or alternation: /// - if the node has only one child, use it instead /// - if the node has zero children, turn it into an empty with Nothing for an alternation or Empty for a concatenation /// </remarks> private RegexNode ReplaceNodeIfUnnecessary() { Debug.Assert(Kind is RegexNodeKind.Alternate or RegexNodeKind.Concatenate); return ChildCount() switch { 0 => new RegexNode(Kind == RegexNodeKind.Alternate ? RegexNodeKind.Nothing : RegexNodeKind.Empty, Options), 1 => Child(0), _ => this, }; } /// <summary>Remove all non-capturing groups.</summary> /// <remark> /// Simple optimization: once parsed into a tree, non-capturing groups /// serve no function, so strip them out. /// e.g. (?:(?:(?:abc))) => abc /// </remark> private RegexNode ReduceGroup() { Debug.Assert(Kind == RegexNodeKind.Group); RegexNode u = this; while (u.Kind == RegexNodeKind.Group) { Debug.Assert(u.ChildCount() == 1); u = u.Child(0); } return u; } /// <summary> /// Remove unnecessary atomic nodes, and make appropriate descendents of the atomic node themselves atomic. /// </summary> /// <remarks> /// e.g. (?>(?>(?>a*))) => (?>a*) /// e.g. (?>(abc*)*) => (?>(abc(?>c*))*) /// </remarks> private RegexNode ReduceAtomic() { // RegexOptions.NonBacktracking doesn't support atomic groups, so when that option // is set we don't want to create atomic groups where they weren't explicitly authored. if ((Options & RegexOptions.NonBacktracking) != 0) { return this; } Debug.Assert(Kind == RegexNodeKind.Atomic); Debug.Assert(ChildCount() == 1); RegexNode atomic = this; RegexNode child = Child(0); while (child.Kind == RegexNodeKind.Atomic) { atomic = child; child = atomic.Child(0); } switch (child.Kind) { // If the child is empty/nothing, there's nothing to be made atomic so the Atomic // node can simply be removed. case RegexNodeKind.Empty: case RegexNodeKind.Nothing: return child; // If the child is already atomic, we can just remove the atomic node. case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: return child; // If an atomic subexpression contains only a {one/notone/set}{loop/lazy}, // change it to be an {one/notone/set}loopatomic and remove the atomic node. case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: child.MakeLoopAtomic(); return child; // Alternations have a variety of possible optimizations that can be applied // iff they're atomic. case RegexNodeKind.Alternate: if ((Options & RegexOptions.RightToLeft) == 0) { List<RegexNode>? branches = child.Children as List<RegexNode>; Debug.Assert(branches is not null && branches.Count != 0); // If an alternation is atomic and its first branch is Empty, the whole thing // is a nop, as Empty will match everything trivially, and no backtracking // into the node will be performed, making the remaining branches irrelevant. if (branches[0].Kind == RegexNodeKind.Empty) { return new RegexNode(RegexNodeKind.Empty, child.Options); } // Similarly, we can trim off any branches after an Empty, as they'll never be used. // An Empty will match anything, and thus branches after that would only be used // if we backtracked into it and advanced passed the Empty after trying the Empty... // but if the alternation is atomic, such backtracking won't happen. for (int i = 1; i < branches.Count - 1; i++) { if (branches[i].Kind == RegexNodeKind.Empty) { branches.RemoveRange(i + 1, branches.Count - (i + 1)); break; } } // If an alternation is atomic, we won't ever backtrack back into it, which // means order matters but not repetition. With backtracking, it would be incorrect // to convert an expression like "hi|there|hello" into "hi|hello|there", as doing // so could then change the order of results if we matched "hi" and then failed // based on what came after it, and both "hello" and "there" could be successful // with what came later. But without backtracking, we can reorder "hi|there|hello" // to instead be "hi|hello|there", as "hello" and "there" can't match the same text, // and once this atomic alternation has matched, we won't try another branch. This // reordering is valuable as it then enables further optimizations, e.g. // "hi|there|hello" => "hi|hello|there" => "h(?:i|ello)|there", which means we only // need to check the 'h' once in case it's not an 'h', and it's easier to employ different // code gen that, for example, switches on first character of the branches, enabling faster // choice of branch without always having to walk through each. bool reordered = false; for (int start = 0; start < branches.Count; start++) { // Get the node that may start our range. If it's a one, multi, or concat of those, proceed. RegexNode startNode = branches[start]; if (startNode.FindBranchOneOrMultiStart() is null) { continue; } // Find the contiguous range of nodes from this point that are similarly one, multi, or concat of those. int endExclusive = start + 1; while (endExclusive < branches.Count && branches[endExclusive].FindBranchOneOrMultiStart() is not null) { endExclusive++; } // If there's at least 3, there may be something to reorder (we won't reorder anything // before the starting position, and so only 2 items is considered ordered). if (endExclusive - start >= 3) { int compare = start; while (compare < endExclusive) { // Get the starting character char c = branches[compare].FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti(); // Move compare to point to the last branch that has the same starting value. while (compare < endExclusive && branches[compare].FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti() == c) { compare++; } // Compare now points to the first node that doesn't match the starting node. // If we've walked off our range, there's nothing left to reorder. if (compare < endExclusive) { // There may be something to reorder. See if there are any other nodes that begin with the same character. for (int next = compare + 1; next < endExclusive; next++) { RegexNode nextChild = branches[next]; if (nextChild.FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti() == c) { branches.RemoveAt(next); branches.Insert(compare++, nextChild); reordered = true; } } } } } // Move to the end of the range we've now explored. endExclusive is not a viable // starting position either, and the start++ for the loop will thus take us to // the next potential place to start a range. start = endExclusive; } // If anything was reordered, there may be new optimization opportunities inside // of the alternation, so reduce it again. if (reordered) { atomic.ReplaceChild(0, child); child = atomic.Child(0); } } goto default; // For everything else, try to reduce ending backtracking of the last contained expression. default: child.EliminateEndingBacktracking(); return atomic; } } /// <summary>Combine nested loops where applicable.</summary> /// <remarks> /// Nested repeaters just get multiplied with each other if they're not too lumpy. /// Other optimizations may have also resulted in {Lazy}loops directly containing /// sets, ones, and notones, in which case they can be transformed into the corresponding /// individual looping constructs. /// </remarks> private RegexNode ReduceLoops() { Debug.Assert(Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop); RegexNode u = this; RegexNodeKind kind = Kind; int min = M; int max = N; while (u.ChildCount() > 0) { RegexNode child = u.Child(0); // multiply reps of the same type only if (child.Kind != kind) { bool valid = false; if (kind == RegexNodeKind.Loop) { switch (child.Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: valid = true; break; } } else // type == Lazyloop { switch (child.Kind) { case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: valid = true; break; } } if (!valid) { break; } } // child can be too lumpy to blur, e.g., (a {100,105}) {3} or (a {2,})? // [but things like (a {2,})+ are not too lumpy...] if (u.M == 0 && child.M > 1 || child.N < child.M * 2) { break; } u = child; if (u.M > 0) { u.M = min = ((int.MaxValue - 1) / u.M < min) ? int.MaxValue : u.M * min; } if (u.N > 0) { u.N = max = ((int.MaxValue - 1) / u.N < max) ? int.MaxValue : u.N * max; } } if (min == int.MaxValue) { return new RegexNode(RegexNodeKind.Nothing, Options); } // If the Loop or Lazyloop now only has one child node and its a Set, One, or Notone, // reduce to just Setloop/lazy, Oneloop/lazy, or Notoneloop/lazy. The parser will // generally have only produced the latter, but other reductions could have exposed // this. if (u.ChildCount() == 1) { RegexNode child = u.Child(0); switch (child.Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: child.MakeRep(u.Kind == RegexNodeKind.Lazyloop ? RegexNodeKind.Onelazy : RegexNodeKind.Oneloop, u.M, u.N); u = child; break; } } return u; } /// <summary> /// Reduces set-related nodes to simpler one-related and notone-related nodes, where applicable. /// </summary> /// <remarks> /// e.g. /// [a] => a /// [a]* => a* /// [a]*? => a*? /// (?>[a]*) => (?>a*) /// [^a] => ^a /// []* => Nothing /// </remarks> private RegexNode ReduceSet() { // Extract empty-set, one, and not-one case as special Debug.Assert(Kind is RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy); Debug.Assert(!string.IsNullOrEmpty(Str)); if (RegexCharClass.IsEmpty(Str)) { Kind = RegexNodeKind.Nothing; Str = null; } else if (RegexCharClass.IsSingleton(Str)) { Ch = RegexCharClass.SingletonChar(Str); Str = null; Kind = Kind == RegexNodeKind.Set ? RegexNodeKind.One : Kind == RegexNodeKind.Setloop ? RegexNodeKind.Oneloop : Kind == RegexNodeKind.Setloopatomic ? RegexNodeKind.Oneloopatomic : RegexNodeKind.Onelazy; } else if (RegexCharClass.IsSingletonInverse(Str)) { Ch = RegexCharClass.SingletonChar(Str); Str = null; Kind = Kind == RegexNodeKind.Set ? RegexNodeKind.Notone : Kind == RegexNodeKind.Setloop ? RegexNodeKind.Notoneloop : Kind == RegexNodeKind.Setloopatomic ? RegexNodeKind.Notoneloopatomic : RegexNodeKind.Notonelazy; } return this; } /// <summary>Optimize an alternation.</summary> private RegexNode ReduceAlternation() { Debug.Assert(Kind == RegexNodeKind.Alternate); switch (ChildCount()) { case 0: return new RegexNode(RegexNodeKind.Nothing, Options); case 1: return Child(0); default: ReduceSingleLetterAndNestedAlternations(); RegexNode node = ReplaceNodeIfUnnecessary(); if (node.Kind == RegexNodeKind.Alternate) { node = ExtractCommonPrefixText(node); if (node.Kind == RegexNodeKind.Alternate) { node = ExtractCommonPrefixOneNotoneSet(node); if (node.Kind == RegexNodeKind.Alternate) { node = RemoveRedundantEmptiesAndNothings(node); } } } return node; } // This function performs two optimizations: // - Single-letter alternations can be replaced by faster set specifications // e.g. "a|b|c|def|g|h" -> "[a-c]|def|[gh]" // - Nested alternations with no intervening operators can be flattened: // e.g. "apple|(?:orange|pear)|grape" -> "apple|orange|pear|grape" void ReduceSingleLetterAndNestedAlternations() { bool wasLastSet = false; bool lastNodeCannotMerge = false; RegexOptions optionsLast = 0; RegexOptions optionsAt; int i; int j; RegexNode at; RegexNode prev; List<RegexNode> children = (List<RegexNode>)Children!; for (i = 0, j = 0; i < children.Count; i++, j++) { at = children[i]; if (j < i) children[j] = at; while (true) { if (at.Kind == RegexNodeKind.Alternate) { if (at.Children is List<RegexNode> atChildren) { for (int k = 0; k < atChildren.Count; k++) { atChildren[k].Parent = this; } children.InsertRange(i + 1, atChildren); } else { RegexNode atChild = (RegexNode)at.Children!; atChild.Parent = this; children.Insert(i + 1, atChild); } j--; } else if (at.Kind is RegexNodeKind.Set or RegexNodeKind.One) { // Cannot merge sets if L or I options differ, or if either are negated. optionsAt = at.Options & (RegexOptions.RightToLeft | RegexOptions.IgnoreCase); if (at.Kind == RegexNodeKind.Set) { if (!wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge || !RegexCharClass.IsMergeable(at.Str!)) { wasLastSet = true; lastNodeCannotMerge = !RegexCharClass.IsMergeable(at.Str!); optionsLast = optionsAt; break; } } else if (!wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge) { wasLastSet = true; lastNodeCannotMerge = false; optionsLast = optionsAt; break; } // The last node was a Set or a One, we're a Set or One and our options are the same. // Merge the two nodes. j--; prev = children[j]; RegexCharClass prevCharClass; if (prev.Kind == RegexNodeKind.One) { prevCharClass = new RegexCharClass(); prevCharClass.AddChar(prev.Ch); } else { prevCharClass = RegexCharClass.Parse(prev.Str!); } if (at.Kind == RegexNodeKind.One) { prevCharClass.AddChar(at.Ch); } else { RegexCharClass atCharClass = RegexCharClass.Parse(at.Str!); prevCharClass.AddCharClass(atCharClass); } prev.Kind = RegexNodeKind.Set; prev.Str = prevCharClass.ToStringClass(Options); if ((prev.Options & RegexOptions.IgnoreCase) != 0 && RegexCharClass.MakeCaseSensitiveIfPossible(prev.Str, RegexParser.GetTargetCulture(prev.Options)) is string newSetString) { prev.Str = newSetString; prev.Options &= ~RegexOptions.IgnoreCase; } } else if (at.Kind == RegexNodeKind.Nothing) { j--; } else { wasLastSet = false; lastNodeCannotMerge = false; } break; } } if (j < i) { children.RemoveRange(j, i - j); } } // This function optimizes out prefix nodes from alternation branches that are // the same across multiple contiguous branches. // e.g. \w12|\d34|\d56|\w78|\w90 => \w12|\d(?:34|56)|\w(?:78|90) static RegexNode ExtractCommonPrefixOneNotoneSet(RegexNode alternation) { Debug.Assert(alternation.Kind == RegexNodeKind.Alternate); Debug.Assert(alternation.Children is List<RegexNode> { Count: >= 2 }); var children = (List<RegexNode>)alternation.Children; // Only process left-to-right prefixes. if ((alternation.Options & RegexOptions.RightToLeft) != 0) { return alternation; } // Only handle the case where each branch is a concatenation foreach (RegexNode child in children) { if (child.Kind != RegexNodeKind.Concatenate || child.ChildCount() < 2) { return alternation; } } for (int startingIndex = 0; startingIndex < children.Count - 1; startingIndex++) { Debug.Assert(children[startingIndex].Children is List<RegexNode> { Count: >= 2 }); // Only handle the case where each branch begins with the same One, Notone, or Set (individual or loop). // Note that while we can do this for individual characters, fixed length loops, and atomic loops, doing // it for non-atomic variable length loops could change behavior as each branch could otherwise have a // different number of characters consumed by the loop based on what's after it. RegexNode required = children[startingIndex].Child(0); switch (required.Kind) { case RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set: case RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloopatomic: case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop or RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy when required.M == required.N: break; default: continue; } // Only handle the case where each branch begins with the exact same node value int endingIndex = startingIndex + 1; for (; endingIndex < children.Count; endingIndex++) { RegexNode other = children[endingIndex].Child(0); if (required.Kind != other.Kind || required.Options != other.Options || required.M != other.M || required.N != other.N || required.Ch != other.Ch || required.Str != other.Str) { break; } } if (endingIndex - startingIndex <= 1) { // Nothing to extract from this starting index. continue; } // Remove the prefix node from every branch, adding it to a new alternation var newAlternate = new RegexNode(RegexNodeKind.Alternate, alternation.Options); for (int i = startingIndex; i < endingIndex; i++) { ((List<RegexNode>)children[i].Children!).RemoveAt(0); newAlternate.AddChild(children[i]); } // If this alternation is wrapped as atomic, we need to do the same for the new alternation. if (alternation.Parent is RegexNode { Kind: RegexNodeKind.Atomic } parent) { var atomic = new RegexNode(RegexNodeKind.Atomic, alternation.Options); atomic.AddChild(newAlternate); newAlternate = atomic; } // Now create a concatenation of the prefix node with the new alternation for the combined // branches, and replace all of the branches in this alternation with that new concatenation. var newConcat = new RegexNode(RegexNodeKind.Concatenate, alternation.Options); newConcat.AddChild(required); newConcat.AddChild(newAlternate); alternation.ReplaceChild(startingIndex, newConcat); children.RemoveRange(startingIndex + 1, endingIndex - startingIndex - 1); } return alternation.ReplaceNodeIfUnnecessary(); } // Removes unnecessary Empty and Nothing nodes from the alternation. A Nothing will never // match, so it can be removed entirely, and an Empty can be removed if there's a previous // Empty in the alternation: it's an extreme case of just having a repeated branch in an // alternation, and while we don't check for all duplicates, checking for empty is easy. static RegexNode RemoveRedundantEmptiesAndNothings(RegexNode node) { Debug.Assert(node.Kind == RegexNodeKind.Alternate); Debug.Assert(node.ChildCount() >= 2); var children = (List<RegexNode>)node.Children!; int i = 0, j = 0; bool seenEmpty = false; while (i < children.Count) { RegexNode child = children[i]; switch (child.Kind) { case RegexNodeKind.Empty when !seenEmpty: seenEmpty = true; goto default; case RegexNodeKind.Empty: case RegexNodeKind.Nothing: i++; break; default: children[j] = children[i]; i++; j++; break; } } children.RemoveRange(j, children.Count - j); return node.ReplaceNodeIfUnnecessary(); } // Analyzes all the branches of the alternation for text that's identical at the beginning // of every branch. That text is then pulled out into its own one or multi node in a // concatenation with the alternation (whose branches are updated to remove that prefix). // This is valuable for a few reasons. One, it exposes potentially more text to the // expression prefix analyzer used to influence FindFirstChar. Second, it exposes more // potential alternation optimizations, e.g. if the same prefix is followed in two branches // by sets that can be merged. Third, it reduces the amount of duplicated comparisons required // if we end up backtracking into subsequent branches. // e.g. abc|ade => a(?bc|de) static RegexNode ExtractCommonPrefixText(RegexNode alternation) { Debug.Assert(alternation.Kind == RegexNodeKind.Alternate); Debug.Assert(alternation.Children is List<RegexNode> { Count: >= 2 }); var children = (List<RegexNode>)alternation.Children; // To keep things relatively simple, we currently only handle: // - Left to right (e.g. we don't process alternations in lookbehinds) // - Branches that are one or multi nodes, or that are concatenations beginning with one or multi nodes. // - All branches having the same options. // Only extract left-to-right prefixes. if ((alternation.Options & RegexOptions.RightToLeft) != 0) { return alternation; } Span<char> scratchChar = stackalloc char[1]; ReadOnlySpan<char> startingSpan = stackalloc char[0]; for (int startingIndex = 0; startingIndex < children.Count - 1; startingIndex++) { // Process the first branch to get the maximum possible common string. RegexNode? startingNode = children[startingIndex].FindBranchOneOrMultiStart(); if (startingNode is null) { return alternation; } RegexOptions startingNodeOptions = startingNode.Options; startingSpan = startingNode.Str.AsSpan(); if (startingNode.Kind == RegexNodeKind.One) { scratchChar[0] = startingNode.Ch; startingSpan = scratchChar; } Debug.Assert(startingSpan.Length > 0); // Now compare the rest of the branches against it. int endingIndex = startingIndex + 1; for (; endingIndex < children.Count; endingIndex++) { // Get the starting node of the next branch. startingNode = children[endingIndex].FindBranchOneOrMultiStart(); if (startingNode is null || startingNode.Options != startingNodeOptions) { break; } // See if the new branch's prefix has a shared prefix with the current one. // If it does, shorten to that; if it doesn't, bail. if (startingNode.Kind == RegexNodeKind.One) { if (startingSpan[0] != startingNode.Ch) { break; } if (startingSpan.Length != 1) { startingSpan = startingSpan.Slice(0, 1); } } else { Debug.Assert(startingNode.Kind == RegexNodeKind.Multi); Debug.Assert(startingNode.Str!.Length > 0); int minLength = Math.Min(startingSpan.Length, startingNode.Str.Length); int c = 0; while (c < minLength && startingSpan[c] == startingNode.Str[c]) c++; if (c == 0) { break; } startingSpan = startingSpan.Slice(0, c); } } // When we get here, we have a starting string prefix shared by all branches // in the range [startingIndex, endingIndex). if (endingIndex - startingIndex <= 1) { // There's nothing to consolidate for this starting node. continue; } // We should be able to consolidate something for the nodes in the range [startingIndex, endingIndex). Debug.Assert(startingSpan.Length > 0); // Create a new node of the form: // Concatenation(prefix, Alternation(each | node | with | prefix | removed)) // that replaces all these branches in this alternation. var prefix = startingSpan.Length == 1 ? new RegexNode(RegexNodeKind.One, startingNodeOptions, startingSpan[0]) : new RegexNode(RegexNodeKind.Multi, startingNodeOptions, startingSpan.ToString()); var newAlternate = new RegexNode(RegexNodeKind.Alternate, startingNodeOptions); for (int i = startingIndex; i < endingIndex; i++) { RegexNode branch = children[i]; ProcessOneOrMulti(branch.Kind == RegexNodeKind.Concatenate ? branch.Child(0) : branch, startingSpan); branch = branch.Reduce(); newAlternate.AddChild(branch); // Remove the starting text from the one or multi node. This may end up changing // the type of the node to be Empty if the starting text matches the node's full value. static void ProcessOneOrMulti(RegexNode node, ReadOnlySpan<char> startingSpan) { if (node.Kind == RegexNodeKind.One) { Debug.Assert(startingSpan.Length == 1); Debug.Assert(startingSpan[0] == node.Ch); node.Kind = RegexNodeKind.Empty; node.Ch = '\0'; } else { Debug.Assert(node.Kind == RegexNodeKind.Multi); Debug.Assert(node.Str.AsSpan().StartsWith(startingSpan, StringComparison.Ordinal)); if (node.Str!.Length == startingSpan.Length) { node.Kind = RegexNodeKind.Empty; node.Str = null; } else if (node.Str.Length - 1 == startingSpan.Length) { node.Kind = RegexNodeKind.One; node.Ch = node.Str[node.Str.Length - 1]; node.Str = null; } else { node.Str = node.Str.Substring(startingSpan.Length); } } } } if (alternation.Parent is RegexNode parent && parent.Kind == RegexNodeKind.Atomic) { var atomic = new RegexNode(RegexNodeKind.Atomic, startingNodeOptions); atomic.AddChild(newAlternate); newAlternate = atomic; } var newConcat = new RegexNode(RegexNodeKind.Concatenate, startingNodeOptions); newConcat.AddChild(prefix); newConcat.AddChild(newAlternate); alternation.ReplaceChild(startingIndex, newConcat); children.RemoveRange(startingIndex + 1, endingIndex - startingIndex - 1); } return alternation.ChildCount() == 1 ? alternation.Child(0) : alternation; } } /// <summary> /// Finds the starting one or multi of the branch, if it has one; otherwise, returns null. /// For simplicity, this only considers branches that are One or Multi, or a Concatenation /// beginning with a One or Multi. We don't traverse more than one level to avoid the /// complication of then having to later update that hierarchy when removing the prefix, /// but it could be done in the future if proven beneficial enough. /// </summary> public RegexNode? FindBranchOneOrMultiStart() { RegexNode branch = Kind == RegexNodeKind.Concatenate ? Child(0) : this; return branch.Kind is RegexNodeKind.One or RegexNodeKind.Multi ? branch : null; } /// <summary>Same as <see cref="FindBranchOneOrMultiStart"/> but also for Sets.</summary> public RegexNode? FindBranchOneMultiOrSetStart() { RegexNode branch = Kind == RegexNodeKind.Concatenate ? Child(0) : this; return branch.Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set ? branch : null; } /// <summary>Gets the character that begins a One or Multi.</summary> public char FirstCharOfOneOrMulti() { Debug.Assert(Kind is RegexNodeKind.One or RegexNodeKind.Multi); Debug.Assert((Options & RegexOptions.RightToLeft) == 0); return Kind == RegexNodeKind.One ? Ch : Str![0]; } /// <summary>Finds the guaranteed beginning literal(s) of the node, or null if none exists.</summary> public (char Char, string? String, string? SetChars)? FindStartingLiteral(int maxSetCharacters = 5) // 5 is max optimized by IndexOfAny today { Debug.Assert(maxSetCharacters >= 0 && maxSetCharacters <= 128, $"{nameof(maxSetCharacters)} == {maxSetCharacters} should be small enough to be stack allocated."); RegexNode? node = this; while (true) { if (node is not null && (node.Options & RegexOptions.RightToLeft) == 0) { switch (node.Kind) { case RegexNodeKind.One: case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when node.M > 0: if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(node.Ch)) { return (node.Ch, null, null); } break; case RegexNodeKind.Multi: if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(node.Str.AsSpan())) { return ('\0', node.Str, null); } break; case RegexNodeKind.Set: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when node.M > 0: Span<char> setChars = stackalloc char[maxSetCharacters]; int numChars; if (!RegexCharClass.IsNegated(node.Str!) && (numChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0) { setChars = setChars.Slice(0, numChars); if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(setChars)) { return ('\0', null, setChars.ToString()); } } break; case RegexNodeKind.Atomic: case RegexNodeKind.Concatenate: case RegexNodeKind.Capture: case RegexNodeKind.Group: case RegexNodeKind.Loop or RegexNodeKind.Lazyloop when node.M > 0: case RegexNodeKind.PositiveLookaround: node = node.Child(0); continue; } } return null; } } /// <summary> /// Optimizes a concatenation by coalescing adjacent characters and strings, /// coalescing adjacent loops, converting loops to be atomic where applicable, /// and removing the concatenation itself if it's unnecessary. /// </summary> private RegexNode ReduceConcatenation() { Debug.Assert(Kind == RegexNodeKind.Concatenate); // If the concat node has zero or only one child, get rid of the concat. switch (ChildCount()) { case 0: return new RegexNode(RegexNodeKind.Empty, Options); case 1: return Child(0); } // Coalesce adjacent loops. This helps to minimize work done by the interpreter, minimize code gen, // and also help to reduce catastrophic backtracking. ReduceConcatenationWithAdjacentLoops(); // Coalesce adjacent characters/strings. This is done after the adjacent loop coalescing so that // a One adjacent to both a Multi and a Loop prefers being folded into the Loop rather than into // the Multi. Doing so helps with auto-atomicity when it's later applied. ReduceConcatenationWithAdjacentStrings(); // If the concatenation is now empty, return an empty node, or if it's got a single child, return that child. // Otherwise, return this. return ReplaceNodeIfUnnecessary(); } /// <summary> /// Combine adjacent characters/strings. /// e.g. (?:abc)(?:def) -> abcdef /// </summary> private void ReduceConcatenationWithAdjacentStrings() { Debug.Assert(Kind == RegexNodeKind.Concatenate); Debug.Assert(Children is List<RegexNode>); bool wasLastString = false; RegexOptions optionsLast = 0; int i, j; List<RegexNode> children = (List<RegexNode>)Children!; for (i = 0, j = 0; i < children.Count; i++, j++) { RegexNode at = children[i]; if (j < i) { children[j] = at; } if (at.Kind == RegexNodeKind.Concatenate && ((at.Options & RegexOptions.RightToLeft) == (Options & RegexOptions.RightToLeft))) { if (at.Children is List<RegexNode> atChildren) { for (int k = 0; k < atChildren.Count; k++) { atChildren[k].Parent = this; } children.InsertRange(i + 1, atChildren); } else { RegexNode atChild = (RegexNode)at.Children!; atChild.Parent = this; children.Insert(i + 1, atChild); } j--; } else if (at.Kind is RegexNodeKind.Multi or RegexNodeKind.One) { // Cannot merge strings if L or I options differ RegexOptions optionsAt = at.Options & (RegexOptions.RightToLeft | RegexOptions.IgnoreCase); if (!wasLastString || optionsLast != optionsAt) { wasLastString = true; optionsLast = optionsAt; continue; } RegexNode prev = children[--j]; if (prev.Kind == RegexNodeKind.One) { prev.Kind = RegexNodeKind.Multi; prev.Str = prev.Ch.ToString(); } if ((optionsAt & RegexOptions.RightToLeft) == 0) { prev.Str = (at.Kind == RegexNodeKind.One) ? $"{prev.Str}{at.Ch}" : prev.Str + at.Str; } else { prev.Str = (at.Kind == RegexNodeKind.One) ? $"{at.Ch}{prev.Str}" : at.Str + prev.Str; } } else if (at.Kind == RegexNodeKind.Empty) { j--; } else { wasLastString = false; } } if (j < i) { children.RemoveRange(j, i - j); } } /// <summary> /// Combine adjacent loops. /// e.g. a*a*a* => a* /// e.g. a+ab => a{2,}b /// </summary> private void ReduceConcatenationWithAdjacentLoops() { Debug.Assert(Kind == RegexNodeKind.Concatenate); Debug.Assert(Children is List<RegexNode>); var children = (List<RegexNode>)Children!; int current = 0, next = 1, nextSave = 1; while (next < children.Count) { RegexNode currentNode = children[current]; RegexNode nextNode = children[next]; if (currentNode.Options == nextNode.Options) { static bool CanCombineCounts(int nodeMin, int nodeMax, int nextMin, int nextMax) { // We shouldn't have an infinite minimum; bail if we find one. Also check for the // degenerate case where we'd make the min overflow or go infinite when it wasn't already. if (nodeMin == int.MaxValue || nextMin == int.MaxValue || (uint)nodeMin + (uint)nextMin >= int.MaxValue) { return false; } // Similar overflow / go infinite check for max (which can be infinite). if (nodeMax != int.MaxValue && nextMax != int.MaxValue && (uint)nodeMax + (uint)nextMax >= int.MaxValue) { return false; } return true; } switch (currentNode.Kind) { // Coalescing a loop with its same type case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy when nextNode.Kind == currentNode.Kind && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when nextNode.Kind == currentNode.Kind && currentNode.Str == nextNode.Str: if (CanCombineCounts(currentNode.M, currentNode.N, nextNode.M, nextNode.N)) { currentNode.M += nextNode.M; if (currentNode.N != int.MaxValue) { currentNode.N = nextNode.N == int.MaxValue ? int.MaxValue : currentNode.N + nextNode.N; } next++; continue; } break; // Coalescing a loop with an additional item of the same type case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when nextNode.Kind == RegexNodeKind.One && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy when nextNode.Kind == RegexNodeKind.Notone && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when nextNode.Kind == RegexNodeKind.Set && currentNode.Str == nextNode.Str: if (CanCombineCounts(currentNode.M, currentNode.N, 1, 1)) { currentNode.M++; if (currentNode.N != int.MaxValue) { currentNode.N++; } next++; continue; } break; // Coalescing a loop with a subsequent string case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when nextNode.Kind == RegexNodeKind.Multi && currentNode.Ch == nextNode.Str![0]: { // Determine how many of the multi's characters can be combined. // We already checked for the first, so we know it's at least one. int matchingCharsInMulti = 1; while (matchingCharsInMulti < nextNode.Str.Length && currentNode.Ch == nextNode.Str[matchingCharsInMulti]) { matchingCharsInMulti++; } if (CanCombineCounts(currentNode.M, currentNode.N, matchingCharsInMulti, matchingCharsInMulti)) { // Update the loop's bounds to include those characters from the multi currentNode.M += matchingCharsInMulti; if (currentNode.N != int.MaxValue) { currentNode.N += matchingCharsInMulti; } // If it was the full multi, skip/remove the multi and continue processing this loop. if (nextNode.Str.Length == matchingCharsInMulti) { next++; continue; } // Otherwise, trim the characters from the multiple that were absorbed into the loop. // If it now only has a single character, it becomes a One. Debug.Assert(matchingCharsInMulti < nextNode.Str.Length); if (nextNode.Str.Length - matchingCharsInMulti == 1) { nextNode.Kind = RegexNodeKind.One; nextNode.Ch = nextNode.Str[nextNode.Str.Length - 1]; nextNode.Str = null; } else { nextNode.Str = nextNode.Str.Substring(matchingCharsInMulti); } } } break; // NOTE: We could add support for coalescing a string with a subsequent loop, but the benefits of that // are limited. Pulling a subsequent string's prefix back into the loop helps with making the loop atomic, // but if the loop is after the string, pulling the suffix of the string forward into the loop may actually // be a deoptimization as those characters could end up matching more slowly as part of loop matching. // Coalescing an individual item with a loop. case RegexNodeKind.One when (nextNode.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy) && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Notone when (nextNode.Kind is RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy) && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Set when (nextNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy) && currentNode.Str == nextNode.Str: if (CanCombineCounts(1, 1, nextNode.M, nextNode.N)) { currentNode.Kind = nextNode.Kind; currentNode.M = nextNode.M + 1; currentNode.N = nextNode.N == int.MaxValue ? int.MaxValue : nextNode.N + 1; next++; continue; } break; // Coalescing an individual item with another individual item. // We don't coalesce adjacent One nodes into a Oneloop as we'd rather they be joined into a Multi. case RegexNodeKind.Notone when nextNode.Kind == currentNode.Kind && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Set when nextNode.Kind == RegexNodeKind.Set && currentNode.Str == nextNode.Str: currentNode.MakeRep(RegexNodeKind.Oneloop, 2, 2); next++; continue; } } children[nextSave++] = children[next]; current = next; next++; } if (nextSave < children.Count) { children.RemoveRange(nextSave, children.Count - nextSave); } } /// <summary> /// Finds {one/notone/set}loop nodes in the concatenation that can be automatically upgraded /// to {one/notone/set}loopatomic nodes. Such changes avoid potential useless backtracking. /// e.g. A*B (where sets A and B don't overlap) => (?>A*)B. /// </summary> private void FindAndMakeLoopsAtomic() { Debug.Assert((Options & RegexOptions.NonBacktracking) == 0, "Atomic groups aren't supported and don't help performance with NonBacktracking"); if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we're too deep on the stack, give up optimizing further. return; } if ((Options & RegexOptions.RightToLeft) != 0) { // RTL is so rare, we don't need to spend additional time/code optimizing for it. return; } // For all node types that have children, recur into each of those children. int childCount = ChildCount(); if (childCount != 0) { for (int i = 0; i < childCount; i++) { Child(i).FindAndMakeLoopsAtomic(); } } // If this isn't a concatenation, nothing more to do. if (Kind is not RegexNodeKind.Concatenate) { return; } // This is a concatenation. Iterate through each pair of nodes in the concatenation seeing whether we can // make the first node (or its right-most child) atomic based on the second node (or its left-most child). Debug.Assert(Children is List<RegexNode>); var children = (List<RegexNode>)Children; for (int i = 0; i < childCount - 1; i++) { ProcessNode(children[i], children[i + 1]); static void ProcessNode(RegexNode node, RegexNode subsequent) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, just stop optimizing. return; } // Skip down the node past irrelevant nodes. while (true) { // We can always recur into captures and into the last node of concatenations. if (node.Kind is RegexNodeKind.Capture or RegexNodeKind.Concatenate) { node = node.Child(node.ChildCount() - 1); continue; } // For loops with at least one guaranteed iteration, we can recur into them, but // we need to be careful not to just always do so; the ending node of a loop can only // be made atomic if what comes after the loop but also the beginning of the loop are // compatible for the optimization. if (node.Kind == RegexNodeKind.Loop) { RegexNode? loopDescendent = node.FindLastExpressionInLoopForAutoAtomic(); if (loopDescendent != null) { node = loopDescendent; continue; } } // Can't skip any further. break; } // If the node can be changed to atomic based on what comes after it, do so. switch (node.Kind) { case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop when CanBeMadeAtomic(node, subsequent, allowSubsequentIteration: true): node.MakeLoopAtomic(); break; case RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional: // In the case of alternation, we can't change the alternation node itself // based on what comes after it (at least not with more complicated analysis // that factors in all branches together), but we can look at each individual // branch, and analyze ending loops in each branch individually to see if they // can be made atomic. Then if we do end up backtracking into the alternation, // we at least won't need to backtrack into that loop. The same is true for // conditionals, though we don't want to process the condition expression // itself, as it's already considered atomic and handled as part of ReduceTestgroup. { int alternateBranches = node.ChildCount(); for (int b = node.Kind == RegexNodeKind.ExpressionConditional ? 1 : 0; b < alternateBranches; b++) { ProcessNode(node.Child(b), subsequent); } } break; } } } } /// <summary> /// Recurs into the last expression of a loop node, looking to see if it can find a node /// that could be made atomic _assuming_ the conditions exist for it with the loop's ancestors. /// </summary> /// <returns>The found node that should be explored further for auto-atomicity; null if it doesn't exist.</returns> private RegexNode? FindLastExpressionInLoopForAutoAtomic() { RegexNode node = this; Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop); // Start by looking at the loop's sole child. node = node.Child(0); // Skip past captures. while (node.Kind == RegexNodeKind.Capture) { node = node.Child(0); } // If the loop's body is a concatenate, we can skip to its last child iff that // last child doesn't conflict with the first child, since this whole concatenation // could be repeated, such that the first node ends up following the last. For // example, in the expression (a+[def])*, the last child is [def] and the first is // a+, which can't possibly overlap with [def]. In contrast, if we had (a+[ade])*, // [ade] could potentially match the starting 'a'. if (node.Kind == RegexNodeKind.Concatenate) { int concatCount = node.ChildCount(); RegexNode lastConcatChild = node.Child(concatCount - 1); if (CanBeMadeAtomic(lastConcatChild, node.Child(0), allowSubsequentIteration: false)) { return lastConcatChild; } } // Otherwise, the loop has nothing that can participate in auto-atomicity. return null; } /// <summary>Optimizations for positive lookaheads/behinds.</summary> private RegexNode ReduceRequire() { Debug.Assert(Kind == RegexNodeKind.PositiveLookaround); Debug.Assert(ChildCount() == 1); // A positive lookaround is a zero-width atomic assertion. // As it's atomic, nothing will backtrack into it, and we can // eliminate any ending backtracking from it. EliminateEndingBacktracking(); // A positive lookaround wrapped around an empty is a nop, and can just // be made into an empty. A developer typically doesn't write this, but // rather it evolves due to optimizations resulting in empty. if (Child(0).Kind == RegexNodeKind.Empty) { Kind = RegexNodeKind.Empty; Children = null; } return this; } /// <summary>Optimizations for negative lookaheads/behinds.</summary> private RegexNode ReducePrevent() { Debug.Assert(Kind == RegexNodeKind.NegativeLookaround); Debug.Assert(ChildCount() == 1); // A negative lookaround wrapped around an empty child, i.e. (?!), is // sometimes used as a way to insert a guaranteed no-match into the expression. // We can reduce it to simply Nothing. if (Child(0).Kind == RegexNodeKind.Empty) { Kind = RegexNodeKind.Nothing; Children = null; } return this; } /// <summary>Optimizations for backreference conditionals.</summary> private RegexNode ReduceTestref() { Debug.Assert(Kind == RegexNodeKind.BackreferenceConditional); Debug.Assert(ChildCount() is 1 or 2); // This isn't so much an optimization as it is changing the tree for consistency. // We want all engines to be able to trust that every Testref will have two children, // even though it's optional in the syntax. If it's missing a "not matched" branch, // we add one that will match empty. if (ChildCount() == 1) { AddChild(new RegexNode(RegexNodeKind.Empty, Options)); } return this; } /// <summary>Optimizations for expression conditionals.</summary> private RegexNode ReduceTestgroup() { Debug.Assert(Kind == RegexNodeKind.ExpressionConditional); Debug.Assert(ChildCount() is 2 or 3); // This isn't so much an optimization as it is changing the tree for consistency. // We want all engines to be able to trust that every Testgroup will have three children, // even though it's optional in the syntax. If it's missing a "not matched" branch, // we add one that will match empty. if (ChildCount() == 2) { AddChild(new RegexNode(RegexNodeKind.Empty, Options)); } // It's common for the condition to be an explicit positive lookahead, as specifying // that eliminates any ambiguity in syntax as to whether the expression is to be matched // as an expression or to be a reference to a capture group. After parsing, however, // there's no ambiguity, and we can remove an extra level of positive lookahead, as the // engines need to treat the condition as a zero-width positive, atomic assertion regardless. RegexNode condition = Child(0); if (condition.Kind == RegexNodeKind.PositiveLookaround && (condition.Options & RegexOptions.RightToLeft) == 0) { ReplaceChild(0, condition.Child(0)); } // We can also eliminate any ending backtracking in the condition, as the condition // is considered to be a positive lookahead, which is an atomic zero-width assertion. condition = Child(0); condition.EliminateEndingBacktracking(); return this; } /// <summary> /// Determines whether node can be switched to an atomic loop. Subsequent is the node /// immediately after 'node'. /// </summary> private static bool CanBeMadeAtomic(RegexNode node, RegexNode subsequent, bool allowSubsequentIteration) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, just stop optimizing. return false; } // In most case, we'll simply check the node against whatever subsequent is. However, in case // subsequent ends up being a loop with a min bound of 0, we'll also need to evaluate the node // against whatever comes after subsequent. In that case, we'll walk the tree to find the // next subsequent, and we'll loop around against to perform the comparison again. while (true) { // Skip the successor down to the closest node that's guaranteed to follow it. int childCount; while ((childCount = subsequent.ChildCount()) > 0) { Debug.Assert(subsequent.Kind != RegexNodeKind.Group); switch (subsequent.Kind) { case RegexNodeKind.Concatenate: case RegexNodeKind.Capture: case RegexNodeKind.Atomic: case RegexNodeKind.PositiveLookaround when (subsequent.Options & RegexOptions.RightToLeft) == 0: // only lookaheads, not lookbehinds (represented as RTL PositiveLookaround nodes) case RegexNodeKind.Loop or RegexNodeKind.Lazyloop when subsequent.M > 0: subsequent = subsequent.Child(0); continue; } break; } // If the two nodes don't agree on options in any way, don't try to optimize them. // TODO: Remove this once https://github.com/dotnet/runtime/issues/61048 is implemented. if (node.Options != subsequent.Options) { return false; } // If the successor is an alternation, all of its children need to be evaluated, since any of them // could come after this node. If any of them fail the optimization, then the whole node fails. // This applies to expression conditionals as well, as long as they have both a yes and a no branch (if there's // only a yes branch, we'd need to also check whatever comes after the conditional). It doesn't apply to // backreference conditionals, as the condition itself is unknown statically and could overlap with the // loop being considered for atomicity. switch (subsequent.Kind) { case RegexNodeKind.Alternate: case RegexNodeKind.ExpressionConditional when childCount == 3: // condition, yes, and no branch for (int i = 0; i < childCount; i++) { if (!CanBeMadeAtomic(node, subsequent.Child(i), allowSubsequentIteration)) { return false; } } return true; } // If this node is a {one/notone/set}loop, see if it overlaps with its successor in the concatenation. // If it doesn't, then we can upgrade it to being a {one/notone/set}loopatomic. // Doing so avoids unnecessary backtracking. switch (node.Kind) { case RegexNodeKind.Oneloop: switch (subsequent.Kind) { case RegexNodeKind.One when node.Ch != subsequent.Ch: case RegexNodeKind.Notone when node.Ch == subsequent.Ch: case RegexNodeKind.Set when !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && node.Ch != subsequent.Ch: case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic when subsequent.M > 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M > 0 && !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): case RegexNodeKind.Multi when node.Ch != subsequent.Str![0]: case RegexNodeKind.End: case RegexNodeKind.EndZ or RegexNodeKind.Eol when node.Ch != '\n': case RegexNodeKind.Boundary when RegexCharClass.IsBoundaryWordChar(node.Ch): case RegexNodeKind.NonBoundary when !RegexCharClass.IsBoundaryWordChar(node.Ch): case RegexNodeKind.ECMABoundary when RegexCharClass.IsECMAWordChar(node.Ch): case RegexNodeKind.NonECMABoundary when !RegexCharClass.IsECMAWordChar(node.Ch): return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && node.Ch != subsequent.Ch: case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic when subsequent.M == 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M == 0 && !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; case RegexNodeKind.Notoneloop: switch (subsequent.Kind) { case RegexNodeKind.One when node.Ch == subsequent.Ch: case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Multi when node.Ch == subsequent.Str![0]: case RegexNodeKind.End: return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && node.Ch == subsequent.Ch: // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; case RegexNodeKind.Setloop: switch (subsequent.Kind) { case RegexNodeKind.One when !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Set when !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M > 0 && !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): case RegexNodeKind.Multi when !RegexCharClass.CharInClass(subsequent.Str![0], node.Str!): case RegexNodeKind.End: case RegexNodeKind.EndZ or RegexNodeKind.Eol when !RegexCharClass.CharInClass('\n', node.Str!): case RegexNodeKind.Boundary when node.Str is RegexCharClass.WordClass or RegexCharClass.DigitClass: case RegexNodeKind.NonBoundary when node.Str is RegexCharClass.NotWordClass or RegexCharClass.NotDigitClass: case RegexNodeKind.ECMABoundary when node.Str is RegexCharClass.ECMAWordClass or RegexCharClass.ECMADigitClass: case RegexNodeKind.NonECMABoundary when node.Str is RegexCharClass.NotECMAWordClass or RegexCharClass.NotDigitClass: return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M == 0 && !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; default: return false; } // We only get here if the node could be made atomic based on subsequent but subsequent has a lower bound of zero // and thus we need to move subsequent to be the next node in sequence and loop around to try again. Debug.Assert(subsequent.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy); Debug.Assert(subsequent.M == 0); if (!allowSubsequentIteration) { return false; } // To be conservative, we only walk up through a very limited set of constructs (even though we may have walked // down through more, like loops), looking for the next concatenation that we're not at the end of, at // which point subsequent becomes whatever node is next in that concatenation. while (true) { RegexNode? parent = subsequent.Parent; switch (parent?.Kind) { case RegexNodeKind.Atomic: case RegexNodeKind.Alternate: case RegexNodeKind.Capture: subsequent = parent; continue; case RegexNodeKind.Concatenate: var peers = (List<RegexNode>)parent.Children!; int currentIndex = peers.IndexOf(subsequent); Debug.Assert(currentIndex >= 0, "Node should have been in its parent's child list"); if (currentIndex + 1 == peers.Count) { subsequent = parent; continue; } else { subsequent = peers[currentIndex + 1]; break; } case null: // If we hit the root, we're at the end of the expression, at which point nothing could backtrack // in and we can declare success. return true; default: // Anything else, we don't know what to do, so we have to assume it could conflict with the loop. return false; } break; } } } /// <summary>Computes a min bound on the required length of any string that could possibly match.</summary> /// <returns>The min computed length. If the result is 0, there is no minimum we can enforce.</returns> /// <remarks> /// e.g. abc[def](ghijkl|mn) => 6 /// </remarks> public int ComputeMinLength() { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, assume there's no minimum we can enforce. return 0; } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: // Single character. return 1; case RegexNodeKind.Multi: // Every character in the string needs to match. return Str!.Length; case RegexNodeKind.Notonelazy: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Setlazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: // One character repeated at least M times. return M; case RegexNodeKind.Lazyloop: case RegexNodeKind.Loop: // A node graph repeated at least M times. return (int)Math.Min(int.MaxValue - 1, (long)M * Child(0).ComputeMinLength()); case RegexNodeKind.Alternate: // The minimum required length for any of the alternation's branches. { int childCount = ChildCount(); Debug.Assert(childCount >= 2); int min = Child(0).ComputeMinLength(); for (int i = 1; i < childCount && min > 0; i++) { min = Math.Min(min, Child(i).ComputeMinLength()); } return min; } case RegexNodeKind.BackreferenceConditional: // Minimum of its yes and no branches. The backreference doesn't add to the length. return Math.Min(Child(0).ComputeMinLength(), Child(1).ComputeMinLength()); case RegexNodeKind.ExpressionConditional: // Minimum of its yes and no branches. The condition is a zero-width assertion. return Math.Min(Child(1).ComputeMinLength(), Child(2).ComputeMinLength()); case RegexNodeKind.Concatenate: // The sum of all of the concatenation's children. { long sum = 0; int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { sum += Child(i).ComputeMinLength(); } return (int)Math.Min(int.MaxValue - 1, sum); } case RegexNodeKind.Atomic: case RegexNodeKind.Capture: case RegexNodeKind.Group: // For groups, we just delegate to the sole child. Debug.Assert(ChildCount() == 1); return Child(0).ComputeMinLength(); case RegexNodeKind.Empty: case RegexNodeKind.Nothing: case RegexNodeKind.UpdateBumpalong: // Nothing to match. In the future, we could potentially use Nothing to say that the min length // is infinite, but that would require a different structure, as that would only apply if the // Nothing match is required in all cases (rather than, say, as one branch of an alternation). case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Start: case RegexNodeKind.NegativeLookaround: case RegexNodeKind.PositiveLookaround: // Zero-width case RegexNodeKind.Backreference: // Requires matching data available only at run-time. In the future, we could choose to find // and follow the capture group this aligns with, while being careful not to end up in an // infinite cycle. return 0; default: Debug.Fail($"Unknown node: {Kind}"); goto case RegexNodeKind.Empty; } } /// <summary>Computes a maximum length of any string that could possibly match.</summary> /// <returns>The maximum length of any string that could possibly match, or null if the length may not always be the same.</returns> /// <remarks> /// e.g. abc[def](gh|ijklmnop) => 12 /// </remarks> public int? ComputeMaxLength() { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, assume there's no minimum we can enforce. return null; } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: // Single character. return 1; case RegexNodeKind.Multi: // Every character in the string needs to match. return Str!.Length; case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic: // Return the max number of iterations if there's an upper bound, or null if it's infinite return N == int.MaxValue ? null : N; case RegexNodeKind.Loop or RegexNodeKind.Lazyloop: if (N != int.MaxValue) { // A node graph repeated a fixed number of times if (Child(0).ComputeMaxLength() is int childMaxLength) { long maxLength = (long)N * childMaxLength; if (maxLength < int.MaxValue) { return (int)maxLength; } } } return null; case RegexNodeKind.Alternate: // The maximum length of any child branch, as long as they all have one. { int childCount = ChildCount(); Debug.Assert(childCount >= 2); if (Child(0).ComputeMaxLength() is not int maxLength) { return null; } for (int i = 1; i < childCount; i++) { if (Child(i).ComputeMaxLength() is not int next) { return null; } maxLength = Math.Max(maxLength, next); } return maxLength; } case RegexNodeKind.BackreferenceConditional: case RegexNodeKind.ExpressionConditional: // The maximum length of either child branch, as long as they both have one.. The condition for an expression conditional is a zero-width assertion. { int i = Kind == RegexNodeKind.BackreferenceConditional ? 0 : 1; return Child(i).ComputeMaxLength() is int yes && Child(i + 1).ComputeMaxLength() is int no ? Math.Max(yes, no) : null; } case RegexNodeKind.Concatenate: // The sum of all of the concatenation's children's max lengths, as long as they all have one. { long sum = 0; int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { if (Child(i).ComputeMaxLength() is not int length) { return null; } sum += length; } if (sum < int.MaxValue) { return (int)sum; } return null; } case RegexNodeKind.Atomic: case RegexNodeKind.Capture: // For groups, we just delegate to the sole child. Debug.Assert(ChildCount() == 1); return Child(0).ComputeMaxLength(); case RegexNodeKind.Empty: case RegexNodeKind.Nothing: case RegexNodeKind.UpdateBumpalong: case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Start: case RegexNodeKind.PositiveLookaround: case RegexNodeKind.NegativeLookaround: // Zero-width return 0; case RegexNodeKind.Backreference: // Requires matching data available only at run-time. In the future, we could choose to find // and follow the capture group this aligns with, while being careful not to end up in an // infinite cycle. return null; default: Debug.Fail($"Unknown node: {Kind}"); goto case RegexNodeKind.Empty; } } /// <summary> /// Determine whether the specified child node is the beginning of a sequence that can /// trivially have length checks combined in order to avoid bounds checks. /// </summary> /// <param name="childIndex">The starting index of the child to check.</param> /// <param name="requiredLength">The sum of all the fixed lengths for the nodes in the sequence.</param> /// <param name="exclusiveEnd">The index of the node just after the last one in the sequence.</param> /// <returns>true if more than one node can have their length checks combined; otherwise, false.</returns> /// <remarks> /// There are additional node types for which we can prove a fixed length, e.g. examining all branches /// of an alternation and returning true if all their lengths are equal. However, the primary purpose /// of this method is to avoid bounds checks by consolidating length checks that guard accesses to /// strings/spans for which the JIT can see a fixed index within bounds, and alternations employ /// patterns that defeat that (e.g. reassigning the span in question). As such, the implementation /// remains focused on only a core subset of nodes that are a) likely to be used in concatenations and /// b) employ simple patterns of checks. /// </remarks> public bool TryGetJoinableLengthCheckChildRange(int childIndex, out int requiredLength, out int exclusiveEnd) { static bool CanJoinLengthCheck(RegexNode node) => node.Kind switch { RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set => true, RegexNodeKind.Multi => true, RegexNodeKind.Oneloop or RegexNodeKind.Onelazy or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic when node.M == node.N => true, _ => false, }; RegexNode child = Child(childIndex); if (CanJoinLengthCheck(child)) { requiredLength = child.ComputeMinLength(); int childCount = ChildCount(); for (exclusiveEnd = childIndex + 1; exclusiveEnd < childCount; exclusiveEnd++) { child = Child(exclusiveEnd); if (!CanJoinLengthCheck(child)) { break; } requiredLength += child.ComputeMinLength(); } if (exclusiveEnd - childIndex > 1) { return true; } } requiredLength = 0; exclusiveEnd = 0; return false; } public RegexNode MakeQuantifier(bool lazy, int min, int max) { // Certain cases of repeaters (min == max) can be handled specially if (min == max) { switch (max) { case 0: // The node is repeated 0 times, so it's actually empty. return new RegexNode(RegexNodeKind.Empty, Options); case 1: // The node is repeated 1 time, so it's not actually a repeater. return this; case <= MultiVsRepeaterLimit when Kind == RegexNodeKind.One: // The same character is repeated a fixed number of times, so it's actually a multi. // While this could remain a repeater, multis are more readily optimized later in // processing. The counts used here in real-world expressions are invariably small (e.g. 4), // but we set an upper bound just to avoid creating really large strings. Debug.Assert(max >= 2); Kind = RegexNodeKind.Multi; Str = new string(Ch, max); Ch = '\0'; return this; } } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: MakeRep(lazy ? RegexNodeKind.Onelazy : RegexNodeKind.Oneloop, min, max); return this; default: var result = new RegexNode(lazy ? RegexNodeKind.Lazyloop : RegexNodeKind.Loop, Options, min, max); result.AddChild(this); return result; } } public void AddChild(RegexNode newChild) { newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented if (Children is null) { Children = newChild; } else if (Children is RegexNode currentChild) { Children = new List<RegexNode>() { currentChild, newChild }; } else { ((List<RegexNode>)Children).Add(newChild); } } public void InsertChild(int index, RegexNode newChild) { Debug.Assert(Children is List<RegexNode>); newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented ((List<RegexNode>)Children).Insert(index, newChild); } public void ReplaceChild(int index, RegexNode newChild) { Debug.Assert(Children != null); Debug.Assert(index < ChildCount()); newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented if (Children is RegexNode) { Children = newChild; } else { ((List<RegexNode>)Children)[index] = newChild; } } public RegexNode Child(int i) => Children is RegexNode child ? child : ((List<RegexNode>)Children!)[i]; public int ChildCount() { if (Children is null) { return 0; } if (Children is List<RegexNode> children) { return children.Count; } Debug.Assert(Children is RegexNode); return 1; } // Determines whether the node supports a compilation / code generation strategy based on walking the node tree. // Also returns a human-readable string to explain the reason (it will be emitted by the source generator, hence // there's no need to localize). internal bool SupportsCompilation([NotNullWhen(false)] out string? reason) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { reason = "run-time limits were exceeded"; return false; } // NonBacktracking isn't supported, nor RightToLeft. The latter applies to both the top-level // options as well as when used to specify positive and negative lookbehinds. if ((Options & RegexOptions.NonBacktracking) != 0) { reason = "RegexOptions.NonBacktracking was specified"; return false; } if ((Options & RegexOptions.RightToLeft) != 0) { reason = "RegexOptions.RightToLeft or a positive/negative lookbehind was used"; return false; } int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { // The node isn't supported if any of its children aren't supported. if (!Child(i).SupportsCompilation(out reason)) { return false; } } // Supported. reason = null; return true; } /// <summary>Gets whether the node is a Set/Setloop/Setloopatomic/Setlazy node.</summary> public bool IsSetFamily => Kind is RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy; /// <summary>Gets whether the node is a One/Oneloop/Oneloopatomic/Onelazy node.</summary> public bool IsOneFamily => Kind is RegexNodeKind.One or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy; /// <summary>Gets whether the node is a Notone/Notoneloop/Notoneloopatomic/Notonelazy node.</summary> public bool IsNotoneFamily => Kind is RegexNodeKind.Notone or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy; /// <summary>Gets whether this node is contained inside of a loop.</summary> public bool IsInLoop() { for (RegexNode? parent = Parent; parent is not null; parent = parent.Parent) { if (parent.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop) { return true; } } return false; } #if DEBUG [ExcludeFromCodeCoverage] public override string ToString() { RegexNode? curNode = this; int curChild = 0; var sb = new StringBuilder().AppendLine(curNode.Describe()); var stack = new List<int>(); while (true) { if (curChild < curNode!.ChildCount()) { stack.Add(curChild + 1); curNode = curNode.Child(curChild); curChild = 0; sb.Append(new string(' ', stack.Count * 2)).Append(curNode.Describe()).AppendLine(); } else { if (stack.Count == 0) { break; } curChild = stack[stack.Count - 1]; stack.RemoveAt(stack.Count - 1); curNode = curNode.Parent; } } return sb.ToString(); } [ExcludeFromCodeCoverage] private string Describe() { var sb = new StringBuilder(Kind.ToString()); if ((Options & RegexOptions.ExplicitCapture) != 0) sb.Append("-C"); if ((Options & RegexOptions.IgnoreCase) != 0) sb.Append("-I"); if ((Options & RegexOptions.RightToLeft) != 0) sb.Append("-L"); if ((Options & RegexOptions.Multiline) != 0) sb.Append("-M"); if ((Options & RegexOptions.Singleline) != 0) sb.Append("-S"); if ((Options & RegexOptions.IgnorePatternWhitespace) != 0) sb.Append("-X"); if ((Options & RegexOptions.ECMAScript) != 0) sb.Append("-E"); switch (Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.One: case RegexNodeKind.Notone: sb.Append(" '").Append(RegexCharClass.DescribeChar(Ch)).Append('\''); break; case RegexNodeKind.Capture: sb.Append(' ').Append($"index = {M}"); if (N != -1) { sb.Append($", unindex = {N}"); } break; case RegexNodeKind.Backreference: case RegexNodeKind.BackreferenceConditional: sb.Append(' ').Append($"index = {M}"); break; case RegexNodeKind.Multi: sb.Append(" \"").Append(Str).Append('"'); break; case RegexNodeKind.Set: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: sb.Append(' ').Append(RegexCharClass.DescribeSet(Str!)); break; } switch (Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: case RegexNodeKind.Loop: case RegexNodeKind.Lazyloop: sb.Append( (M == 0 && N == int.MaxValue) ? "*" : (M == 0 && N == 1) ? "?" : (M == 1 && N == int.MaxValue) ? "+" : (N == int.MaxValue) ? $"{{{M}, *}}" : (N == M) ? $"{{{M}}}" : $"{{{M}, {N}}}"); break; } return sb.ToString(); } #endif } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Threading; namespace System.Text.RegularExpressions { /// <summary>Represents a regex subexpression.</summary> internal sealed class RegexNode { /// <summary>empty bit from the node's options to store data on whether a node contains captures</summary> internal const RegexOptions HasCapturesFlag = (RegexOptions)(1 << 31); /// <summary>Arbitrary number of repetitions of the same character when we'd prefer to represent that as a repeater of that character rather than a string.</summary> internal const int MultiVsRepeaterLimit = 64; /// <summary>The node's children.</summary> /// <remarks>null if no children, a <see cref="RegexNode"/> if one child, or a <see cref="List{RegexNode}"/> if multiple children.</remarks> private object? Children; /// <summary>The kind of expression represented by this node.</summary> public RegexNodeKind Kind { get; private set; } /// <summary>A string associated with the node.</summary> /// <remarks>For a <see cref="RegexNodeKind.Multi"/>, this is the string from the expression. For an <see cref="IsSetFamily"/> node, this is the character class string from <see cref="RegexCharClass"/>.</remarks> public string? Str { get; private set; } /// <summary>The character associated with the node.</summary> /// <remarks>For a <see cref="IsOneFamily"/> or <see cref="IsNotoneFamily"/> node, the character from the expression.</remarks> public char Ch { get; private set; } /// <summary>The minimum number of iterations for a loop, or the capture group number for a capture or backreference.</summary> /// <remarks>No minimum is represented by 0. No capture group is represented by -1.</remarks> public int M { get; private set; } /// <summary>The maximum number of iterations for a loop, or the uncapture group number for a balancing group.</summary> /// <remarks>No upper bound is represented by <see cref="int.MaxValue"/>. No capture group is represented by -1.</remarks> public int N { get; private set; } /// <summary>The options associated with the node.</summary> public RegexOptions Options; /// <summary>The node's parent node in the tree.</summary> /// <remarks> /// During parsing, top-level nodes are also stacked onto a parse stack (a stack of trees) using <see cref="Parent"/>. /// After parsing, <see cref="Parent"/> is the node in the tree that has this node as or in <see cref="Children"/>. /// </remarks> public RegexNode? Parent; public RegexNode(RegexNodeKind kind, RegexOptions options) { Kind = kind; Options = options; } public RegexNode(RegexNodeKind kind, RegexOptions options, char ch) { Kind = kind; Options = options; Ch = ch; } public RegexNode(RegexNodeKind kind, RegexOptions options, string str) { Kind = kind; Options = options; Str = str; } public RegexNode(RegexNodeKind kind, RegexOptions options, int m) { Kind = kind; Options = options; M = m; } public RegexNode(RegexNodeKind kind, RegexOptions options, int m, int n) { Kind = kind; Options = options; M = m; N = n; } /// <summary>Creates a RegexNode representing a single character.</summary> /// <param name="ch">The character.</param> /// <param name="options">The node's options.</param> /// <param name="culture">The culture to use to perform any required transformations.</param> /// <returns>The created RegexNode. This might be a RegexNode.One or a RegexNode.Set.</returns> public static RegexNode CreateOneWithCaseConversion(char ch, RegexOptions options, CultureInfo? culture) { // If the options specify case-insensitivity, we try to create a node that fully encapsulates that. if ((options & RegexOptions.IgnoreCase) != 0) { Debug.Assert(culture is not null); // If the character is part of a Unicode category that doesn't participate in case conversion, // we can simply strip out the IgnoreCase option and make the node case-sensitive. if (!RegexCharClass.ParticipatesInCaseConversion(ch)) { return new RegexNode(RegexNodeKind.One, options & ~RegexOptions.IgnoreCase, ch); } // Create a set for the character, trying to include all case-insensitive equivalent characters. // If it's successful in doing so, resultIsCaseInsensitive will be false and we can strip // out RegexOptions.IgnoreCase as part of creating the set. string stringSet = RegexCharClass.OneToStringClass(ch, culture, out bool resultIsCaseInsensitive); if (!resultIsCaseInsensitive) { return new RegexNode(RegexNodeKind.Set, options & ~RegexOptions.IgnoreCase, stringSet); } // Otherwise, until we can get rid of ToLower usage at match time entirely (https://github.com/dotnet/runtime/issues/61048), // lowercase the character and proceed to create an IgnoreCase One node. ch = culture.TextInfo.ToLower(ch); } // Create a One node for the character. return new RegexNode(RegexNodeKind.One, options, ch); } /// <summary>Reverses all children of a concatenation when in RightToLeft mode.</summary> public RegexNode ReverseConcatenationIfRightToLeft() { if ((Options & RegexOptions.RightToLeft) != 0 && Kind == RegexNodeKind.Concatenate && ChildCount() > 1) { ((List<RegexNode>)Children!).Reverse(); } return this; } /// <summary> /// Pass type as OneLazy or OneLoop /// </summary> private void MakeRep(RegexNodeKind kind, int min, int max) { Kind += kind - RegexNodeKind.One; M = min; N = max; } private void MakeLoopAtomic() { switch (Kind) { case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop: // For loops, we simply change the Type to the atomic variant. // Atomic greedy loops should consume as many values as they can. Kind += RegexNodeKind.Oneloopatomic - RegexNodeKind.Oneloop; break; case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy: // For lazy, we not only change the Type, we also lower the max number of iterations // to the minimum number of iterations, creating a repeater, as they should end up // matching as little as possible. Kind += RegexNodeKind.Oneloopatomic - RegexNodeKind.Onelazy; N = M; if (N == 0) { // If moving the max to be the same as the min dropped it to 0, there's no // work to be done for this node, and we can make it Empty. Kind = RegexNodeKind.Empty; Str = null; Ch = '\0'; } else if (Kind == RegexNodeKind.Oneloopatomic && N is >= 2 and <= MultiVsRepeaterLimit) { // If this is now a One repeater with a small enough length, // make it a Multi instead, as they're better optimized down the line. Kind = RegexNodeKind.Multi; Str = new string(Ch, N); Ch = '\0'; M = N = 0; } break; default: Debug.Fail($"Unexpected type: {Kind}"); break; } } #if DEBUG /// <summary>Validate invariants the rest of the implementation relies on for processing fully-built trees.</summary> [Conditional("DEBUG")] private void ValidateFinalTreeInvariants() { Debug.Assert(Kind == RegexNodeKind.Capture, "Every generated tree should begin with a capture node"); var toExamine = new Stack<RegexNode>(); toExamine.Push(this); while (toExamine.Count > 0) { RegexNode node = toExamine.Pop(); // Add all children to be examined int childCount = node.ChildCount(); for (int i = 0; i < childCount; i++) { RegexNode child = node.Child(i); Debug.Assert(child.Parent == node, $"{child.Describe()} missing reference to parent {node.Describe()}"); toExamine.Push(child); } // Validate that we never see certain node types. Debug.Assert(Kind != RegexNodeKind.Group, "All Group nodes should have been removed."); // Validate node types and expected child counts. switch (node.Kind) { case RegexNodeKind.Group: Debug.Fail("All Group nodes should have been removed."); break; case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.Empty: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.Multi: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Nothing: case RegexNodeKind.Notone: case RegexNodeKind.Notonelazy: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.One: case RegexNodeKind.Onelazy: case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Backreference: case RegexNodeKind.Set: case RegexNodeKind.Setlazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Start: case RegexNodeKind.UpdateBumpalong: Debug.Assert(childCount == 0, $"Expected zero children for {node.Kind}, got {childCount}."); break; case RegexNodeKind.Atomic: case RegexNodeKind.Capture: case RegexNodeKind.Lazyloop: case RegexNodeKind.Loop: case RegexNodeKind.NegativeLookaround: case RegexNodeKind.PositiveLookaround: Debug.Assert(childCount == 1, $"Expected one and only one child for {node.Kind}, got {childCount}."); break; case RegexNodeKind.BackreferenceConditional: Debug.Assert(childCount == 2, $"Expected two children for {node.Kind}, got {childCount}"); break; case RegexNodeKind.ExpressionConditional: Debug.Assert(childCount == 3, $"Expected three children for {node.Kind}, got {childCount}"); break; case RegexNodeKind.Concatenate: case RegexNodeKind.Alternate: Debug.Assert(childCount >= 2, $"Expected at least two children for {node.Kind}, got {childCount}."); break; default: Debug.Fail($"Unexpected node type: {node.Kind}"); break; } // Validate node configuration. switch (node.Kind) { case RegexNodeKind.Multi: Debug.Assert(node.Str is not null, "Expect non-null multi string"); Debug.Assert(node.Str.Length >= 2, $"Expected {node.Str} to be at least two characters"); break; case RegexNodeKind.Set: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: Debug.Assert(!string.IsNullOrEmpty(node.Str), $"Expected non-null, non-empty string for {node.Kind}."); break; default: Debug.Assert(node.Str is null, $"Expected null string for {node.Kind}, got \"{node.Str}\"."); break; } } } #endif /// <summary>Performs additional optimizations on an entire tree prior to being used.</summary> /// <remarks> /// Some optimizations are performed by the parser while parsing, and others are performed /// as nodes are being added to the tree. The optimizations here expect the tree to be fully /// formed, as they inspect relationships between nodes that may not have been in place as /// individual nodes were being processed/added to the tree. /// </remarks> internal RegexNode FinalOptimize() { RegexNode rootNode = this; Debug.Assert(rootNode.Kind == RegexNodeKind.Capture); Debug.Assert(rootNode.Parent is null); Debug.Assert(rootNode.ChildCount() == 1); // Only apply optimization when LTR to avoid needing additional code for the much rarer RTL case. // Also only apply these optimizations when not using NonBacktracking, as these optimizations are // all about avoiding things that are impactful for the backtracking engines but nops for non-backtracking. if ((Options & (RegexOptions.RightToLeft | RegexOptions.NonBacktracking)) == 0) { // Optimization: eliminate backtracking for loops. // For any single-character loop (Oneloop, Notoneloop, Setloop), see if we can automatically convert // that into its atomic counterpart (Oneloopatomic, Notoneloopatomic, Setloopatomic) based on what // comes after it in the expression tree. rootNode.FindAndMakeLoopsAtomic(); // Optimization: backtracking removal at expression end. // If we find backtracking construct at the end of the regex, we can instead make it non-backtracking, // since nothing would ever backtrack into it anyway. Doing this then makes the construct available // to implementations that don't support backtracking. rootNode.EliminateEndingBacktracking(); // Optimization: unnecessary re-processing of starting loops. // If an expression is guaranteed to begin with a single-character unbounded loop that isn't part of an alternation (in which case it // wouldn't be guaranteed to be at the beginning) or a capture (in which case a back reference could be influenced by its length), then we // can update the tree with a temporary node to indicate that the implementation should use that node's ending position in the input text // as the next starting position at which to start the next match. This avoids redoing matches we've already performed, e.g. matching // "\[email protected]" against "is this a valid [email protected]", the \w+ will initially match the "is" and then will fail to match the "@". // Rather than bumping the scan loop by 1 and trying again to match at the "s", we can instead start at the " ". For functional correctness // we can only consider unbounded loops, as to be able to start at the end of the loop we need the loop to have consumed all possible matches; // otherwise, you could end up with a pattern like "a{1,3}b" matching against "aaaabc", which should match, but if we pre-emptively stop consuming // after the first three a's and re-start from that position, we'll end up failing the match even though it should have succeeded. We can also // apply this optimization to non-atomic loops: even though backtracking could be necessary, such backtracking would be handled within the processing // of a single starting position. Lazy loops similarly benefit, as a failed match will result in exploring the exact same search space as with // a greedy loop, just in the opposite order (and a successful match will overwrite the bumpalong position); we need to avoid atomic lazy loops, // however, as they will only end up as a repeater for the minimum length and thus will effectively end up with a non-infinite upper bound, which // we've already outlined is problematic. { RegexNode node = rootNode.Child(0); // skip implicit root capture node bool atomicByAncestry = true; // the root is implicitly atomic because nothing comes after it (same for the implicit root capture) while (true) { switch (node.Kind) { case RegexNodeKind.Atomic: node = node.Child(0); continue; case RegexNodeKind.Concatenate: atomicByAncestry = false; node = node.Child(0); continue; case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when node.N == int.MaxValue: case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy when node.N == int.MaxValue && !atomicByAncestry: if (node.Parent is { Kind: RegexNodeKind.Concatenate } parent) { parent.InsertChild(1, new RegexNode(RegexNodeKind.UpdateBumpalong, node.Options)); } break; } break; } } } // Done optimizing. Return the final tree. #if DEBUG rootNode.ValidateFinalTreeInvariants(); #endif return rootNode; } /// <summary>Converts nodes at the end of the node tree to be atomic.</summary> /// <remarks> /// The correctness of this optimization depends on nothing being able to backtrack into /// the provided node. That means it must be at the root of the overall expression, or /// it must be an Atomic node that nothing will backtrack into by the very nature of Atomic. /// </remarks> private void EliminateEndingBacktracking() { if (!StackHelper.TryEnsureSufficientExecutionStack() || (Options & (RegexOptions.RightToLeft | RegexOptions.NonBacktracking)) != 0) { // If we can't recur further, just stop optimizing. // We haven't done the work to validate this is correct for RTL. // And NonBacktracking doesn't support atomic groups and doesn't have backtracking to be eliminated. return; } // Walk the tree starting from the current node. RegexNode node = this; while (true) { switch (node.Kind) { // {One/Notone/Set}loops can be upgraded to {One/Notone/Set}loopatomic nodes, e.g. [abc]* => (?>[abc]*). // And {One/Notone/Set}lazys can similarly be upgraded to be atomic, which really makes them into repeaters // or even empty nodes. case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop: case RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy: node.MakeLoopAtomic(); break; // Just because a particular node is atomic doesn't mean all its descendants are. // Process them as well. Lookarounds are implicitly atomic. case RegexNodeKind.Atomic: case RegexNodeKind.PositiveLookaround: case RegexNodeKind.NegativeLookaround: node = node.Child(0); continue; // For Capture and Concatenate, we just recur into their last child (only child in the case // of Capture). However, if the child is an alternation or loop, we can also make the // node itself atomic by wrapping it in an Atomic node. Since we later check to see whether a // node is atomic based on its parent or grandparent, we don't bother wrapping such a node in // an Atomic one if its grandparent is already Atomic. // e.g. [xyz](?:abc|def) => [xyz](?>abc|def) case RegexNodeKind.Capture: case RegexNodeKind.Concatenate: RegexNode existingChild = node.Child(node.ChildCount() - 1); if ((existingChild.Kind is RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional or RegexNodeKind.Loop or RegexNodeKind.Lazyloop) && (node.Parent is null || node.Parent.Kind != RegexNodeKind.Atomic)) // validate grandparent isn't atomic { var atomic = new RegexNode(RegexNodeKind.Atomic, existingChild.Options); atomic.AddChild(existingChild); node.ReplaceChild(node.ChildCount() - 1, atomic); } node = existingChild; continue; // For alternate, we can recur into each branch separately. We use this iteration for the first branch. // Conditionals are just like alternations in this regard. // e.g. abc*|def* => ab(?>c*)|de(?>f*) case RegexNodeKind.Alternate: case RegexNodeKind.BackreferenceConditional: case RegexNodeKind.ExpressionConditional: { int branches = node.ChildCount(); for (int i = 1; i < branches; i++) { node.Child(i).EliminateEndingBacktracking(); } if (node.Kind != RegexNodeKind.ExpressionConditional) // ReduceExpressionConditional will have already applied ending backtracking removal { node = node.Child(0); continue; } } break; // For {Lazy}Loop, we search to see if there's a viable last expression, and iff there // is we recur into processing it. Also, as with the single-char lazy loops, LazyLoop // can have its max iteration count dropped to its min iteration count, as there's no // reason for it to match more than the minimal at the end; that in turn makes it a // repeater, which results in better code generation. // e.g. (?:abc*)* => (?:ab(?>c*))* // e.g. (abc*?)+? => (ab){1} case RegexNodeKind.Lazyloop: node.N = node.M; goto case RegexNodeKind.Loop; case RegexNodeKind.Loop: { if (node.N == 1) { // If the loop has a max iteration count of 1 (e.g. it's an optional node), // there's no possibility for conflict between multiple iterations, so // we can process it. node = node.Child(0); continue; } RegexNode? loopDescendent = node.FindLastExpressionInLoopForAutoAtomic(); if (loopDescendent != null) { node = loopDescendent; continue; // loop around to process node } } break; } break; } } /// <summary> /// Removes redundant nodes from the subtree, and returns an optimized subtree. /// </summary> internal RegexNode Reduce() { // TODO: https://github.com/dotnet/runtime/issues/61048 // As part of overhauling IgnoreCase handling, the parser shouldn't produce any nodes other than Backreference // that ever have IgnoreCase set on them. For now, though, remove IgnoreCase from any nodes for which it // has no behavioral effect. switch (Kind) { default: // No effect Options &= ~RegexOptions.IgnoreCase; break; case RegexNodeKind.One or RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notone or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Set or RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic: case RegexNodeKind.Multi: case RegexNodeKind.Backreference: // Still meaningful break; } return Kind switch { RegexNodeKind.Alternate => ReduceAlternation(), RegexNodeKind.Atomic => ReduceAtomic(), RegexNodeKind.Concatenate => ReduceConcatenation(), RegexNodeKind.Group => ReduceGroup(), RegexNodeKind.Loop or RegexNodeKind.Lazyloop => ReduceLoops(), RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround => ReduceLookaround(), RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy => ReduceSet(), RegexNodeKind.ExpressionConditional => ReduceExpressionConditional(), RegexNodeKind.BackreferenceConditional => ReduceBackreferenceConditional(), _ => this, }; } /// <summary>Remove an unnecessary Concatenation or Alternation node</summary> /// <remarks> /// Simple optimization for a concatenation or alternation: /// - if the node has only one child, use it instead /// - if the node has zero children, turn it into an empty with Nothing for an alternation or Empty for a concatenation /// </remarks> private RegexNode ReplaceNodeIfUnnecessary() { Debug.Assert(Kind is RegexNodeKind.Alternate or RegexNodeKind.Concatenate); return ChildCount() switch { 0 => new RegexNode(Kind == RegexNodeKind.Alternate ? RegexNodeKind.Nothing : RegexNodeKind.Empty, Options), 1 => Child(0), _ => this, }; } /// <summary>Remove all non-capturing groups.</summary> /// <remark> /// Simple optimization: once parsed into a tree, non-capturing groups /// serve no function, so strip them out. /// e.g. (?:(?:(?:abc))) => abc /// </remark> private RegexNode ReduceGroup() { Debug.Assert(Kind == RegexNodeKind.Group); RegexNode u = this; while (u.Kind == RegexNodeKind.Group) { Debug.Assert(u.ChildCount() == 1); u = u.Child(0); } return u; } /// <summary> /// Remove unnecessary atomic nodes, and make appropriate descendents of the atomic node themselves atomic. /// </summary> /// <remarks> /// e.g. (?>(?>(?>a*))) => (?>a*) /// e.g. (?>(abc*)*) => (?>(abc(?>c*))*) /// </remarks> private RegexNode ReduceAtomic() { // RegexOptions.NonBacktracking doesn't support atomic groups, so when that option // is set we don't want to create atomic groups where they weren't explicitly authored. if ((Options & RegexOptions.NonBacktracking) != 0) { return this; } Debug.Assert(Kind == RegexNodeKind.Atomic); Debug.Assert(ChildCount() == 1); RegexNode atomic = this; RegexNode child = Child(0); while (child.Kind == RegexNodeKind.Atomic) { atomic = child; child = atomic.Child(0); } switch (child.Kind) { // If the child is empty/nothing, there's nothing to be made atomic so the Atomic // node can simply be removed. case RegexNodeKind.Empty: case RegexNodeKind.Nothing: return child; // If the child is already atomic, we can just remove the atomic node. case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloopatomic: return child; // If an atomic subexpression contains only a {one/notone/set}{loop/lazy}, // change it to be an {one/notone/set}loopatomic and remove the atomic node. case RegexNodeKind.Oneloop: case RegexNodeKind.Notoneloop: case RegexNodeKind.Setloop: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: child.MakeLoopAtomic(); return child; // Alternations have a variety of possible optimizations that can be applied // iff they're atomic. case RegexNodeKind.Alternate: if ((Options & RegexOptions.RightToLeft) == 0) { List<RegexNode>? branches = child.Children as List<RegexNode>; Debug.Assert(branches is not null && branches.Count != 0); // If an alternation is atomic and its first branch is Empty, the whole thing // is a nop, as Empty will match everything trivially, and no backtracking // into the node will be performed, making the remaining branches irrelevant. if (branches[0].Kind == RegexNodeKind.Empty) { return new RegexNode(RegexNodeKind.Empty, child.Options); } // Similarly, we can trim off any branches after an Empty, as they'll never be used. // An Empty will match anything, and thus branches after that would only be used // if we backtracked into it and advanced passed the Empty after trying the Empty... // but if the alternation is atomic, such backtracking won't happen. for (int i = 1; i < branches.Count - 1; i++) { if (branches[i].Kind == RegexNodeKind.Empty) { branches.RemoveRange(i + 1, branches.Count - (i + 1)); break; } } // If an alternation is atomic, we won't ever backtrack back into it, which // means order matters but not repetition. With backtracking, it would be incorrect // to convert an expression like "hi|there|hello" into "hi|hello|there", as doing // so could then change the order of results if we matched "hi" and then failed // based on what came after it, and both "hello" and "there" could be successful // with what came later. But without backtracking, we can reorder "hi|there|hello" // to instead be "hi|hello|there", as "hello" and "there" can't match the same text, // and once this atomic alternation has matched, we won't try another branch. This // reordering is valuable as it then enables further optimizations, e.g. // "hi|there|hello" => "hi|hello|there" => "h(?:i|ello)|there", which means we only // need to check the 'h' once in case it's not an 'h', and it's easier to employ different // code gen that, for example, switches on first character of the branches, enabling faster // choice of branch without always having to walk through each. bool reordered = false; for (int start = 0; start < branches.Count; start++) { // Get the node that may start our range. If it's a one, multi, or concat of those, proceed. RegexNode startNode = branches[start]; if (startNode.FindBranchOneOrMultiStart() is null) { continue; } // Find the contiguous range of nodes from this point that are similarly one, multi, or concat of those. int endExclusive = start + 1; while (endExclusive < branches.Count && branches[endExclusive].FindBranchOneOrMultiStart() is not null) { endExclusive++; } // If there's at least 3, there may be something to reorder (we won't reorder anything // before the starting position, and so only 2 items is considered ordered). if (endExclusive - start >= 3) { int compare = start; while (compare < endExclusive) { // Get the starting character char c = branches[compare].FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti(); // Move compare to point to the last branch that has the same starting value. while (compare < endExclusive && branches[compare].FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti() == c) { compare++; } // Compare now points to the first node that doesn't match the starting node. // If we've walked off our range, there's nothing left to reorder. if (compare < endExclusive) { // There may be something to reorder. See if there are any other nodes that begin with the same character. for (int next = compare + 1; next < endExclusive; next++) { RegexNode nextChild = branches[next]; if (nextChild.FindBranchOneOrMultiStart()!.FirstCharOfOneOrMulti() == c) { branches.RemoveAt(next); branches.Insert(compare++, nextChild); reordered = true; } } } } } // Move to the end of the range we've now explored. endExclusive is not a viable // starting position either, and the start++ for the loop will thus take us to // the next potential place to start a range. start = endExclusive; } // If anything was reordered, there may be new optimization opportunities inside // of the alternation, so reduce it again. if (reordered) { atomic.ReplaceChild(0, child); child = atomic.Child(0); } } goto default; // For everything else, try to reduce ending backtracking of the last contained expression. default: child.EliminateEndingBacktracking(); return atomic; } } /// <summary>Combine nested loops where applicable.</summary> /// <remarks> /// Nested repeaters just get multiplied with each other if they're not too lumpy. /// Other optimizations may have also resulted in {Lazy}loops directly containing /// sets, ones, and notones, in which case they can be transformed into the corresponding /// individual looping constructs. /// </remarks> private RegexNode ReduceLoops() { Debug.Assert(Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop); RegexNode u = this; RegexNodeKind kind = Kind; int min = M; int max = N; while (u.ChildCount() > 0) { RegexNode child = u.Child(0); // multiply reps of the same type only if (child.Kind != kind) { bool valid = false; if (kind == RegexNodeKind.Loop) { switch (child.Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: valid = true; break; } } else // type == Lazyloop { switch (child.Kind) { case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setlazy: valid = true; break; } } if (!valid) { break; } } // child can be too lumpy to blur, e.g., (a {100,105}) {3} or (a {2,})? // [but things like (a {2,})+ are not too lumpy...] if (u.M == 0 && child.M > 1 || child.N < child.M * 2) { break; } u = child; if (u.M > 0) { u.M = min = ((int.MaxValue - 1) / u.M < min) ? int.MaxValue : u.M * min; } if (u.N > 0) { u.N = max = ((int.MaxValue - 1) / u.N < max) ? int.MaxValue : u.N * max; } } if (min == int.MaxValue) { return new RegexNode(RegexNodeKind.Nothing, Options); } // If the Loop or Lazyloop now only has one child node and its a Set, One, or Notone, // reduce to just Setloop/lazy, Oneloop/lazy, or Notoneloop/lazy. The parser will // generally have only produced the latter, but other reductions could have exposed // this. if (u.ChildCount() == 1) { RegexNode child = u.Child(0); switch (child.Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: child.MakeRep(u.Kind == RegexNodeKind.Lazyloop ? RegexNodeKind.Onelazy : RegexNodeKind.Oneloop, u.M, u.N); u = child; break; } } return u; } /// <summary> /// Reduces set-related nodes to simpler one-related and notone-related nodes, where applicable. /// </summary> /// <remarks> /// e.g. /// [a] => a /// [a]* => a* /// [a]*? => a*? /// (?>[a]*) => (?>a*) /// [^a] => ^a /// []* => Nothing /// </remarks> private RegexNode ReduceSet() { // Extract empty-set, one, and not-one case as special Debug.Assert(Kind is RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy); Debug.Assert(!string.IsNullOrEmpty(Str)); if (RegexCharClass.IsEmpty(Str)) { Kind = RegexNodeKind.Nothing; Str = null; } else if (RegexCharClass.IsSingleton(Str)) { Ch = RegexCharClass.SingletonChar(Str); Str = null; Kind = Kind == RegexNodeKind.Set ? RegexNodeKind.One : Kind == RegexNodeKind.Setloop ? RegexNodeKind.Oneloop : Kind == RegexNodeKind.Setloopatomic ? RegexNodeKind.Oneloopatomic : RegexNodeKind.Onelazy; } else if (RegexCharClass.IsSingletonInverse(Str)) { Ch = RegexCharClass.SingletonChar(Str); Str = null; Kind = Kind == RegexNodeKind.Set ? RegexNodeKind.Notone : Kind == RegexNodeKind.Setloop ? RegexNodeKind.Notoneloop : Kind == RegexNodeKind.Setloopatomic ? RegexNodeKind.Notoneloopatomic : RegexNodeKind.Notonelazy; } return this; } /// <summary>Optimize an alternation.</summary> private RegexNode ReduceAlternation() { Debug.Assert(Kind == RegexNodeKind.Alternate); switch (ChildCount()) { case 0: return new RegexNode(RegexNodeKind.Nothing, Options); case 1: return Child(0); default: ReduceSingleLetterAndNestedAlternations(); RegexNode node = ReplaceNodeIfUnnecessary(); if (node.Kind == RegexNodeKind.Alternate) { node = ExtractCommonPrefixText(node); if (node.Kind == RegexNodeKind.Alternate) { node = ExtractCommonPrefixOneNotoneSet(node); if (node.Kind == RegexNodeKind.Alternate) { node = RemoveRedundantEmptiesAndNothings(node); } } } return node; } // This function performs two optimizations: // - Single-letter alternations can be replaced by faster set specifications // e.g. "a|b|c|def|g|h" -> "[a-c]|def|[gh]" // - Nested alternations with no intervening operators can be flattened: // e.g. "apple|(?:orange|pear)|grape" -> "apple|orange|pear|grape" void ReduceSingleLetterAndNestedAlternations() { bool wasLastSet = false; bool lastNodeCannotMerge = false; RegexOptions optionsLast = 0; RegexOptions optionsAt; int i; int j; RegexNode at; RegexNode prev; List<RegexNode> children = (List<RegexNode>)Children!; for (i = 0, j = 0; i < children.Count; i++, j++) { at = children[i]; if (j < i) children[j] = at; while (true) { if (at.Kind == RegexNodeKind.Alternate) { if (at.Children is List<RegexNode> atChildren) { for (int k = 0; k < atChildren.Count; k++) { atChildren[k].Parent = this; } children.InsertRange(i + 1, atChildren); } else { RegexNode atChild = (RegexNode)at.Children!; atChild.Parent = this; children.Insert(i + 1, atChild); } j--; } else if (at.Kind is RegexNodeKind.Set or RegexNodeKind.One) { // Cannot merge sets if L or I options differ, or if either are negated. optionsAt = at.Options & (RegexOptions.RightToLeft | RegexOptions.IgnoreCase); if (at.Kind == RegexNodeKind.Set) { if (!wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge || !RegexCharClass.IsMergeable(at.Str!)) { wasLastSet = true; lastNodeCannotMerge = !RegexCharClass.IsMergeable(at.Str!); optionsLast = optionsAt; break; } } else if (!wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge) { wasLastSet = true; lastNodeCannotMerge = false; optionsLast = optionsAt; break; } // The last node was a Set or a One, we're a Set or One and our options are the same. // Merge the two nodes. j--; prev = children[j]; RegexCharClass prevCharClass; if (prev.Kind == RegexNodeKind.One) { prevCharClass = new RegexCharClass(); prevCharClass.AddChar(prev.Ch); } else { prevCharClass = RegexCharClass.Parse(prev.Str!); } if (at.Kind == RegexNodeKind.One) { prevCharClass.AddChar(at.Ch); } else { RegexCharClass atCharClass = RegexCharClass.Parse(at.Str!); prevCharClass.AddCharClass(atCharClass); } prev.Kind = RegexNodeKind.Set; prev.Str = prevCharClass.ToStringClass(Options); if ((prev.Options & RegexOptions.IgnoreCase) != 0 && RegexCharClass.MakeCaseSensitiveIfPossible(prev.Str, RegexParser.GetTargetCulture(prev.Options)) is string newSetString) { prev.Str = newSetString; prev.Options &= ~RegexOptions.IgnoreCase; } } else if (at.Kind == RegexNodeKind.Nothing) { j--; } else { wasLastSet = false; lastNodeCannotMerge = false; } break; } } if (j < i) { children.RemoveRange(j, i - j); } } // This function optimizes out prefix nodes from alternation branches that are // the same across multiple contiguous branches. // e.g. \w12|\d34|\d56|\w78|\w90 => \w12|\d(?:34|56)|\w(?:78|90) static RegexNode ExtractCommonPrefixOneNotoneSet(RegexNode alternation) { Debug.Assert(alternation.Kind == RegexNodeKind.Alternate); Debug.Assert(alternation.Children is List<RegexNode> { Count: >= 2 }); var children = (List<RegexNode>)alternation.Children; // Only process left-to-right prefixes. if ((alternation.Options & RegexOptions.RightToLeft) != 0) { return alternation; } // Only handle the case where each branch is a concatenation foreach (RegexNode child in children) { if (child.Kind != RegexNodeKind.Concatenate || child.ChildCount() < 2) { return alternation; } } for (int startingIndex = 0; startingIndex < children.Count - 1; startingIndex++) { Debug.Assert(children[startingIndex].Children is List<RegexNode> { Count: >= 2 }); // Only handle the case where each branch begins with the same One, Notone, or Set (individual or loop). // Note that while we can do this for individual characters, fixed length loops, and atomic loops, doing // it for non-atomic variable length loops could change behavior as each branch could otherwise have a // different number of characters consumed by the loop based on what's after it. RegexNode required = children[startingIndex].Child(0); switch (required.Kind) { case RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set: case RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloopatomic: case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop or RegexNodeKind.Onelazy or RegexNodeKind.Notonelazy or RegexNodeKind.Setlazy when required.M == required.N: break; default: continue; } // Only handle the case where each branch begins with the exact same node value int endingIndex = startingIndex + 1; for (; endingIndex < children.Count; endingIndex++) { RegexNode other = children[endingIndex].Child(0); if (required.Kind != other.Kind || required.Options != other.Options || required.M != other.M || required.N != other.N || required.Ch != other.Ch || required.Str != other.Str) { break; } } if (endingIndex - startingIndex <= 1) { // Nothing to extract from this starting index. continue; } // Remove the prefix node from every branch, adding it to a new alternation var newAlternate = new RegexNode(RegexNodeKind.Alternate, alternation.Options); for (int i = startingIndex; i < endingIndex; i++) { ((List<RegexNode>)children[i].Children!).RemoveAt(0); newAlternate.AddChild(children[i]); } // If this alternation is wrapped as atomic, we need to do the same for the new alternation. if (alternation.Parent is RegexNode { Kind: RegexNodeKind.Atomic } parent) { var atomic = new RegexNode(RegexNodeKind.Atomic, alternation.Options); atomic.AddChild(newAlternate); newAlternate = atomic; } // Now create a concatenation of the prefix node with the new alternation for the combined // branches, and replace all of the branches in this alternation with that new concatenation. var newConcat = new RegexNode(RegexNodeKind.Concatenate, alternation.Options); newConcat.AddChild(required); newConcat.AddChild(newAlternate); alternation.ReplaceChild(startingIndex, newConcat); children.RemoveRange(startingIndex + 1, endingIndex - startingIndex - 1); } return alternation.ReplaceNodeIfUnnecessary(); } // Removes unnecessary Empty and Nothing nodes from the alternation. A Nothing will never // match, so it can be removed entirely, and an Empty can be removed if there's a previous // Empty in the alternation: it's an extreme case of just having a repeated branch in an // alternation, and while we don't check for all duplicates, checking for empty is easy. static RegexNode RemoveRedundantEmptiesAndNothings(RegexNode node) { Debug.Assert(node.Kind == RegexNodeKind.Alternate); Debug.Assert(node.ChildCount() >= 2); var children = (List<RegexNode>)node.Children!; int i = 0, j = 0; bool seenEmpty = false; while (i < children.Count) { RegexNode child = children[i]; switch (child.Kind) { case RegexNodeKind.Empty when !seenEmpty: seenEmpty = true; goto default; case RegexNodeKind.Empty: case RegexNodeKind.Nothing: i++; break; default: children[j] = children[i]; i++; j++; break; } } children.RemoveRange(j, children.Count - j); return node.ReplaceNodeIfUnnecessary(); } // Analyzes all the branches of the alternation for text that's identical at the beginning // of every branch. That text is then pulled out into its own one or multi node in a // concatenation with the alternation (whose branches are updated to remove that prefix). // This is valuable for a few reasons. One, it exposes potentially more text to the // expression prefix analyzer used to influence FindFirstChar. Second, it exposes more // potential alternation optimizations, e.g. if the same prefix is followed in two branches // by sets that can be merged. Third, it reduces the amount of duplicated comparisons required // if we end up backtracking into subsequent branches. // e.g. abc|ade => a(?bc|de) static RegexNode ExtractCommonPrefixText(RegexNode alternation) { Debug.Assert(alternation.Kind == RegexNodeKind.Alternate); Debug.Assert(alternation.Children is List<RegexNode> { Count: >= 2 }); var children = (List<RegexNode>)alternation.Children; // To keep things relatively simple, we currently only handle: // - Left to right (e.g. we don't process alternations in lookbehinds) // - Branches that are one or multi nodes, or that are concatenations beginning with one or multi nodes. // - All branches having the same options. // Only extract left-to-right prefixes. if ((alternation.Options & RegexOptions.RightToLeft) != 0) { return alternation; } Span<char> scratchChar = stackalloc char[1]; ReadOnlySpan<char> startingSpan = stackalloc char[0]; for (int startingIndex = 0; startingIndex < children.Count - 1; startingIndex++) { // Process the first branch to get the maximum possible common string. RegexNode? startingNode = children[startingIndex].FindBranchOneOrMultiStart(); if (startingNode is null) { return alternation; } RegexOptions startingNodeOptions = startingNode.Options; startingSpan = startingNode.Str.AsSpan(); if (startingNode.Kind == RegexNodeKind.One) { scratchChar[0] = startingNode.Ch; startingSpan = scratchChar; } Debug.Assert(startingSpan.Length > 0); // Now compare the rest of the branches against it. int endingIndex = startingIndex + 1; for (; endingIndex < children.Count; endingIndex++) { // Get the starting node of the next branch. startingNode = children[endingIndex].FindBranchOneOrMultiStart(); if (startingNode is null || startingNode.Options != startingNodeOptions) { break; } // See if the new branch's prefix has a shared prefix with the current one. // If it does, shorten to that; if it doesn't, bail. if (startingNode.Kind == RegexNodeKind.One) { if (startingSpan[0] != startingNode.Ch) { break; } if (startingSpan.Length != 1) { startingSpan = startingSpan.Slice(0, 1); } } else { Debug.Assert(startingNode.Kind == RegexNodeKind.Multi); Debug.Assert(startingNode.Str!.Length > 0); int minLength = Math.Min(startingSpan.Length, startingNode.Str.Length); int c = 0; while (c < minLength && startingSpan[c] == startingNode.Str[c]) c++; if (c == 0) { break; } startingSpan = startingSpan.Slice(0, c); } } // When we get here, we have a starting string prefix shared by all branches // in the range [startingIndex, endingIndex). if (endingIndex - startingIndex <= 1) { // There's nothing to consolidate for this starting node. continue; } // We should be able to consolidate something for the nodes in the range [startingIndex, endingIndex). Debug.Assert(startingSpan.Length > 0); // Create a new node of the form: // Concatenation(prefix, Alternation(each | node | with | prefix | removed)) // that replaces all these branches in this alternation. var prefix = startingSpan.Length == 1 ? new RegexNode(RegexNodeKind.One, startingNodeOptions, startingSpan[0]) : new RegexNode(RegexNodeKind.Multi, startingNodeOptions, startingSpan.ToString()); var newAlternate = new RegexNode(RegexNodeKind.Alternate, startingNodeOptions); for (int i = startingIndex; i < endingIndex; i++) { RegexNode branch = children[i]; ProcessOneOrMulti(branch.Kind == RegexNodeKind.Concatenate ? branch.Child(0) : branch, startingSpan); branch = branch.Reduce(); newAlternate.AddChild(branch); // Remove the starting text from the one or multi node. This may end up changing // the type of the node to be Empty if the starting text matches the node's full value. static void ProcessOneOrMulti(RegexNode node, ReadOnlySpan<char> startingSpan) { if (node.Kind == RegexNodeKind.One) { Debug.Assert(startingSpan.Length == 1); Debug.Assert(startingSpan[0] == node.Ch); node.Kind = RegexNodeKind.Empty; node.Ch = '\0'; } else { Debug.Assert(node.Kind == RegexNodeKind.Multi); Debug.Assert(node.Str.AsSpan().StartsWith(startingSpan, StringComparison.Ordinal)); if (node.Str!.Length == startingSpan.Length) { node.Kind = RegexNodeKind.Empty; node.Str = null; } else if (node.Str.Length - 1 == startingSpan.Length) { node.Kind = RegexNodeKind.One; node.Ch = node.Str[node.Str.Length - 1]; node.Str = null; } else { node.Str = node.Str.Substring(startingSpan.Length); } } } } if (alternation.Parent is RegexNode parent && parent.Kind == RegexNodeKind.Atomic) { var atomic = new RegexNode(RegexNodeKind.Atomic, startingNodeOptions); atomic.AddChild(newAlternate); newAlternate = atomic; } var newConcat = new RegexNode(RegexNodeKind.Concatenate, startingNodeOptions); newConcat.AddChild(prefix); newConcat.AddChild(newAlternate); alternation.ReplaceChild(startingIndex, newConcat); children.RemoveRange(startingIndex + 1, endingIndex - startingIndex - 1); } return alternation.ChildCount() == 1 ? alternation.Child(0) : alternation; } } /// <summary> /// Finds the starting one or multi of the branch, if it has one; otherwise, returns null. /// For simplicity, this only considers branches that are One or Multi, or a Concatenation /// beginning with a One or Multi. We don't traverse more than one level to avoid the /// complication of then having to later update that hierarchy when removing the prefix, /// but it could be done in the future if proven beneficial enough. /// </summary> public RegexNode? FindBranchOneOrMultiStart() { RegexNode branch = Kind == RegexNodeKind.Concatenate ? Child(0) : this; return branch.Kind is RegexNodeKind.One or RegexNodeKind.Multi ? branch : null; } /// <summary>Same as <see cref="FindBranchOneOrMultiStart"/> but also for Sets.</summary> public RegexNode? FindBranchOneMultiOrSetStart() { RegexNode branch = Kind == RegexNodeKind.Concatenate ? Child(0) : this; return branch.Kind is RegexNodeKind.One or RegexNodeKind.Multi or RegexNodeKind.Set ? branch : null; } /// <summary>Gets the character that begins a One or Multi.</summary> public char FirstCharOfOneOrMulti() { Debug.Assert(Kind is RegexNodeKind.One or RegexNodeKind.Multi); Debug.Assert((Options & RegexOptions.RightToLeft) == 0); return Kind == RegexNodeKind.One ? Ch : Str![0]; } /// <summary>Finds the guaranteed beginning literal(s) of the node, or null if none exists.</summary> public (char Char, string? String, string? SetChars)? FindStartingLiteral(int maxSetCharacters = 5) // 5 is max optimized by IndexOfAny today { Debug.Assert(maxSetCharacters >= 0 && maxSetCharacters <= 128, $"{nameof(maxSetCharacters)} == {maxSetCharacters} should be small enough to be stack allocated."); RegexNode? node = this; while (true) { if (node is not null && (node.Options & RegexOptions.RightToLeft) == 0) { switch (node.Kind) { case RegexNodeKind.One: case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when node.M > 0: if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(node.Ch)) { return (node.Ch, null, null); } break; case RegexNodeKind.Multi: if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(node.Str.AsSpan())) { return ('\0', node.Str, null); } break; case RegexNodeKind.Set: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when node.M > 0: Span<char> setChars = stackalloc char[maxSetCharacters]; int numChars; if (!RegexCharClass.IsNegated(node.Str!) && (numChars = RegexCharClass.GetSetChars(node.Str!, setChars)) != 0) { setChars = setChars.Slice(0, numChars); if ((node.Options & RegexOptions.IgnoreCase) == 0 || !RegexCharClass.ParticipatesInCaseConversion(setChars)) { return ('\0', null, setChars.ToString()); } } break; case RegexNodeKind.Atomic: case RegexNodeKind.Concatenate: case RegexNodeKind.Capture: case RegexNodeKind.Group: case RegexNodeKind.Loop or RegexNodeKind.Lazyloop when node.M > 0: case RegexNodeKind.PositiveLookaround: node = node.Child(0); continue; } } return null; } } /// <summary> /// Optimizes a concatenation by coalescing adjacent characters and strings, /// coalescing adjacent loops, converting loops to be atomic where applicable, /// and removing the concatenation itself if it's unnecessary. /// </summary> private RegexNode ReduceConcatenation() { Debug.Assert(Kind == RegexNodeKind.Concatenate); // If the concat node has zero or only one child, get rid of the concat. switch (ChildCount()) { case 0: return new RegexNode(RegexNodeKind.Empty, Options); case 1: return Child(0); } // Coalesce adjacent loops. This helps to minimize work done by the interpreter, minimize code gen, // and also help to reduce catastrophic backtracking. ReduceConcatenationWithAdjacentLoops(); // Coalesce adjacent characters/strings. This is done after the adjacent loop coalescing so that // a One adjacent to both a Multi and a Loop prefers being folded into the Loop rather than into // the Multi. Doing so helps with auto-atomicity when it's later applied. ReduceConcatenationWithAdjacentStrings(); // If the concatenation is now empty, return an empty node, or if it's got a single child, return that child. // Otherwise, return this. return ReplaceNodeIfUnnecessary(); } /// <summary> /// Combine adjacent characters/strings. /// e.g. (?:abc)(?:def) -> abcdef /// </summary> private void ReduceConcatenationWithAdjacentStrings() { Debug.Assert(Kind == RegexNodeKind.Concatenate); Debug.Assert(Children is List<RegexNode>); bool wasLastString = false; RegexOptions optionsLast = 0; int i, j; List<RegexNode> children = (List<RegexNode>)Children!; for (i = 0, j = 0; i < children.Count; i++, j++) { RegexNode at = children[i]; if (j < i) { children[j] = at; } if (at.Kind == RegexNodeKind.Concatenate && ((at.Options & RegexOptions.RightToLeft) == (Options & RegexOptions.RightToLeft))) { if (at.Children is List<RegexNode> atChildren) { for (int k = 0; k < atChildren.Count; k++) { atChildren[k].Parent = this; } children.InsertRange(i + 1, atChildren); } else { RegexNode atChild = (RegexNode)at.Children!; atChild.Parent = this; children.Insert(i + 1, atChild); } j--; } else if (at.Kind is RegexNodeKind.Multi or RegexNodeKind.One) { // Cannot merge strings if L or I options differ RegexOptions optionsAt = at.Options & (RegexOptions.RightToLeft | RegexOptions.IgnoreCase); if (!wasLastString || optionsLast != optionsAt) { wasLastString = true; optionsLast = optionsAt; continue; } RegexNode prev = children[--j]; if (prev.Kind == RegexNodeKind.One) { prev.Kind = RegexNodeKind.Multi; prev.Str = prev.Ch.ToString(); } if ((optionsAt & RegexOptions.RightToLeft) == 0) { prev.Str = (at.Kind == RegexNodeKind.One) ? $"{prev.Str}{at.Ch}" : prev.Str + at.Str; } else { prev.Str = (at.Kind == RegexNodeKind.One) ? $"{at.Ch}{prev.Str}" : at.Str + prev.Str; } } else if (at.Kind == RegexNodeKind.Empty) { j--; } else { wasLastString = false; } } if (j < i) { children.RemoveRange(j, i - j); } } /// <summary> /// Combine adjacent loops. /// e.g. a*a*a* => a* /// e.g. a+ab => a{2,}b /// </summary> private void ReduceConcatenationWithAdjacentLoops() { Debug.Assert(Kind == RegexNodeKind.Concatenate); Debug.Assert(Children is List<RegexNode>); var children = (List<RegexNode>)Children!; int current = 0, next = 1, nextSave = 1; while (next < children.Count) { RegexNode currentNode = children[current]; RegexNode nextNode = children[next]; if (currentNode.Options == nextNode.Options) { static bool CanCombineCounts(int nodeMin, int nodeMax, int nextMin, int nextMax) { // We shouldn't have an infinite minimum; bail if we find one. Also check for the // degenerate case where we'd make the min overflow or go infinite when it wasn't already. if (nodeMin == int.MaxValue || nextMin == int.MaxValue || (uint)nodeMin + (uint)nextMin >= int.MaxValue) { return false; } // Similar overflow / go infinite check for max (which can be infinite). if (nodeMax != int.MaxValue && nextMax != int.MaxValue && (uint)nodeMax + (uint)nextMax >= int.MaxValue) { return false; } return true; } switch (currentNode.Kind) { // Coalescing a loop with its same type case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy when nextNode.Kind == currentNode.Kind && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when nextNode.Kind == currentNode.Kind && currentNode.Str == nextNode.Str: if (CanCombineCounts(currentNode.M, currentNode.N, nextNode.M, nextNode.N)) { currentNode.M += nextNode.M; if (currentNode.N != int.MaxValue) { currentNode.N = nextNode.N == int.MaxValue ? int.MaxValue : currentNode.N + nextNode.N; } next++; continue; } break; // Coalescing a loop with an additional item of the same type case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when nextNode.Kind == RegexNodeKind.One && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy when nextNode.Kind == RegexNodeKind.Notone && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy when nextNode.Kind == RegexNodeKind.Set && currentNode.Str == nextNode.Str: if (CanCombineCounts(currentNode.M, currentNode.N, 1, 1)) { currentNode.M++; if (currentNode.N != int.MaxValue) { currentNode.N++; } next++; continue; } break; // Coalescing a loop with a subsequent string case RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy when nextNode.Kind == RegexNodeKind.Multi && currentNode.Ch == nextNode.Str![0]: { // Determine how many of the multi's characters can be combined. // We already checked for the first, so we know it's at least one. int matchingCharsInMulti = 1; while (matchingCharsInMulti < nextNode.Str.Length && currentNode.Ch == nextNode.Str[matchingCharsInMulti]) { matchingCharsInMulti++; } if (CanCombineCounts(currentNode.M, currentNode.N, matchingCharsInMulti, matchingCharsInMulti)) { // Update the loop's bounds to include those characters from the multi currentNode.M += matchingCharsInMulti; if (currentNode.N != int.MaxValue) { currentNode.N += matchingCharsInMulti; } // If it was the full multi, skip/remove the multi and continue processing this loop. if (nextNode.Str.Length == matchingCharsInMulti) { next++; continue; } // Otherwise, trim the characters from the multiple that were absorbed into the loop. // If it now only has a single character, it becomes a One. Debug.Assert(matchingCharsInMulti < nextNode.Str.Length); if (nextNode.Str.Length - matchingCharsInMulti == 1) { nextNode.Kind = RegexNodeKind.One; nextNode.Ch = nextNode.Str[nextNode.Str.Length - 1]; nextNode.Str = null; } else { nextNode.Str = nextNode.Str.Substring(matchingCharsInMulti); } } } break; // NOTE: We could add support for coalescing a string with a subsequent loop, but the benefits of that // are limited. Pulling a subsequent string's prefix back into the loop helps with making the loop atomic, // but if the loop is after the string, pulling the suffix of the string forward into the loop may actually // be a deoptimization as those characters could end up matching more slowly as part of loop matching. // Coalescing an individual item with a loop. case RegexNodeKind.One when (nextNode.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy) && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Notone when (nextNode.Kind is RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy) && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Set when (nextNode.Kind is RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy) && currentNode.Str == nextNode.Str: if (CanCombineCounts(1, 1, nextNode.M, nextNode.N)) { currentNode.Kind = nextNode.Kind; currentNode.M = nextNode.M + 1; currentNode.N = nextNode.N == int.MaxValue ? int.MaxValue : nextNode.N + 1; next++; continue; } break; // Coalescing an individual item with another individual item. // We don't coalesce adjacent One nodes into a Oneloop as we'd rather they be joined into a Multi. case RegexNodeKind.Notone when nextNode.Kind == currentNode.Kind && currentNode.Ch == nextNode.Ch: case RegexNodeKind.Set when nextNode.Kind == RegexNodeKind.Set && currentNode.Str == nextNode.Str: currentNode.MakeRep(RegexNodeKind.Oneloop, 2, 2); next++; continue; } } children[nextSave++] = children[next]; current = next; next++; } if (nextSave < children.Count) { children.RemoveRange(nextSave, children.Count - nextSave); } } /// <summary> /// Finds {one/notone/set}loop nodes in the concatenation that can be automatically upgraded /// to {one/notone/set}loopatomic nodes. Such changes avoid potential useless backtracking. /// e.g. A*B (where sets A and B don't overlap) => (?>A*)B. /// </summary> private void FindAndMakeLoopsAtomic() { Debug.Assert((Options & RegexOptions.NonBacktracking) == 0, "Atomic groups aren't supported and don't help performance with NonBacktracking"); if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we're too deep on the stack, give up optimizing further. return; } if ((Options & RegexOptions.RightToLeft) != 0) { // RTL is so rare, we don't need to spend additional time/code optimizing for it. return; } // For all node types that have children, recur into each of those children. int childCount = ChildCount(); if (childCount != 0) { for (int i = 0; i < childCount; i++) { Child(i).FindAndMakeLoopsAtomic(); } } // If this isn't a concatenation, nothing more to do. if (Kind is not RegexNodeKind.Concatenate) { return; } // This is a concatenation. Iterate through each pair of nodes in the concatenation seeing whether we can // make the first node (or its right-most child) atomic based on the second node (or its left-most child). Debug.Assert(Children is List<RegexNode>); var children = (List<RegexNode>)Children; for (int i = 0; i < childCount - 1; i++) { ProcessNode(children[i], children[i + 1]); static void ProcessNode(RegexNode node, RegexNode subsequent) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, just stop optimizing. return; } // Skip down the node past irrelevant nodes. while (true) { // We can always recur into captures and into the last node of concatenations. if (node.Kind is RegexNodeKind.Capture or RegexNodeKind.Concatenate) { node = node.Child(node.ChildCount() - 1); continue; } // For loops with at least one guaranteed iteration, we can recur into them, but // we need to be careful not to just always do so; the ending node of a loop can only // be made atomic if what comes after the loop but also the beginning of the loop are // compatible for the optimization. if (node.Kind == RegexNodeKind.Loop) { RegexNode? loopDescendent = node.FindLastExpressionInLoopForAutoAtomic(); if (loopDescendent != null) { node = loopDescendent; continue; } } // Can't skip any further. break; } // If the node can be changed to atomic based on what comes after it, do so. switch (node.Kind) { case RegexNodeKind.Oneloop or RegexNodeKind.Notoneloop or RegexNodeKind.Setloop when CanBeMadeAtomic(node, subsequent, allowSubsequentIteration: true): node.MakeLoopAtomic(); break; case RegexNodeKind.Alternate or RegexNodeKind.BackreferenceConditional or RegexNodeKind.ExpressionConditional: // In the case of alternation, we can't change the alternation node itself // based on what comes after it (at least not with more complicated analysis // that factors in all branches together), but we can look at each individual // branch, and analyze ending loops in each branch individually to see if they // can be made atomic. Then if we do end up backtracking into the alternation, // we at least won't need to backtrack into that loop. The same is true for // conditionals, though we don't want to process the condition expression // itself, as it's already considered atomic and handled as part of ReduceExpressionConditional. { int alternateBranches = node.ChildCount(); for (int b = node.Kind == RegexNodeKind.ExpressionConditional ? 1 : 0; b < alternateBranches; b++) { ProcessNode(node.Child(b), subsequent); } } break; } } } } /// <summary> /// Recurs into the last expression of a loop node, looking to see if it can find a node /// that could be made atomic _assuming_ the conditions exist for it with the loop's ancestors. /// </summary> /// <returns>The found node that should be explored further for auto-atomicity; null if it doesn't exist.</returns> private RegexNode? FindLastExpressionInLoopForAutoAtomic() { RegexNode node = this; Debug.Assert(node.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop); // Start by looking at the loop's sole child. node = node.Child(0); // Skip past captures. while (node.Kind == RegexNodeKind.Capture) { node = node.Child(0); } // If the loop's body is a concatenate, we can skip to its last child iff that // last child doesn't conflict with the first child, since this whole concatenation // could be repeated, such that the first node ends up following the last. For // example, in the expression (a+[def])*, the last child is [def] and the first is // a+, which can't possibly overlap with [def]. In contrast, if we had (a+[ade])*, // [ade] could potentially match the starting 'a'. if (node.Kind == RegexNodeKind.Concatenate) { int concatCount = node.ChildCount(); RegexNode lastConcatChild = node.Child(concatCount - 1); if (CanBeMadeAtomic(lastConcatChild, node.Child(0), allowSubsequentIteration: false)) { return lastConcatChild; } } // Otherwise, the loop has nothing that can participate in auto-atomicity. return null; } /// <summary>Optimizations for positive and negative lookaheads/behinds.</summary> private RegexNode ReduceLookaround() { Debug.Assert(Kind is RegexNodeKind.PositiveLookaround or RegexNodeKind.NegativeLookaround); Debug.Assert(ChildCount() == 1); // A lookaround is a zero-width atomic assertion. // As it's atomic, nothing will backtrack into it, and we can // eliminate any ending backtracking from it. EliminateEndingBacktracking(); // A positive lookaround wrapped around an empty is a nop, and we can reduce it // to simply Empty. A developer typically doesn't write this, but rather it evolves // due to optimizations resulting in empty. // A negative lookaround wrapped around an empty child, i.e. (?!), is // sometimes used as a way to insert a guaranteed no-match into the expression, // often as part of a conditional. We can reduce it to simply Nothing. if (Child(0).Kind == RegexNodeKind.Empty) { Kind = Kind == RegexNodeKind.PositiveLookaround ? RegexNodeKind.Empty : RegexNodeKind.Nothing; Children = null; } return this; } /// <summary>Optimizations for backreference conditionals.</summary> private RegexNode ReduceBackreferenceConditional() { Debug.Assert(Kind == RegexNodeKind.BackreferenceConditional); Debug.Assert(ChildCount() is 1 or 2); // This isn't so much an optimization as it is changing the tree for consistency. We want // all engines to be able to trust that every backreference conditional will have two children, // even though it's optional in the syntax. If it's missing a "not matched" branch, // we add one that will match empty. if (ChildCount() == 1) { AddChild(new RegexNode(RegexNodeKind.Empty, Options)); } return this; } /// <summary>Optimizations for expression conditionals.</summary> private RegexNode ReduceExpressionConditional() { Debug.Assert(Kind == RegexNodeKind.ExpressionConditional); Debug.Assert(ChildCount() is 2 or 3); // This isn't so much an optimization as it is changing the tree for consistency. We want // all engines to be able to trust that every expression conditional will have three children, // even though it's optional in the syntax. If it's missing a "not matched" branch, // we add one that will match empty. if (ChildCount() == 2) { AddChild(new RegexNode(RegexNodeKind.Empty, Options)); } // It's common for the condition to be an explicit positive lookahead, as specifying // that eliminates any ambiguity in syntax as to whether the expression is to be matched // as an expression or to be a reference to a capture group. After parsing, however, // there's no ambiguity, and we can remove an extra level of positive lookahead, as the // engines need to treat the condition as a zero-width positive, atomic assertion regardless. RegexNode condition = Child(0); if (condition.Kind == RegexNodeKind.PositiveLookaround && (condition.Options & RegexOptions.RightToLeft) == 0) { ReplaceChild(0, condition.Child(0)); } // We can also eliminate any ending backtracking in the condition, as the condition // is considered to be a positive lookahead, which is an atomic zero-width assertion. condition = Child(0); condition.EliminateEndingBacktracking(); return this; } /// <summary> /// Determines whether node can be switched to an atomic loop. Subsequent is the node /// immediately after 'node'. /// </summary> private static bool CanBeMadeAtomic(RegexNode node, RegexNode subsequent, bool allowSubsequentIteration) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, just stop optimizing. return false; } // In most case, we'll simply check the node against whatever subsequent is. However, in case // subsequent ends up being a loop with a min bound of 0, we'll also need to evaluate the node // against whatever comes after subsequent. In that case, we'll walk the tree to find the // next subsequent, and we'll loop around against to perform the comparison again. while (true) { // Skip the successor down to the closest node that's guaranteed to follow it. int childCount; while ((childCount = subsequent.ChildCount()) > 0) { Debug.Assert(subsequent.Kind != RegexNodeKind.Group); switch (subsequent.Kind) { case RegexNodeKind.Concatenate: case RegexNodeKind.Capture: case RegexNodeKind.Atomic: case RegexNodeKind.PositiveLookaround when (subsequent.Options & RegexOptions.RightToLeft) == 0: // only lookaheads, not lookbehinds (represented as RTL PositiveLookaround nodes) case RegexNodeKind.Loop or RegexNodeKind.Lazyloop when subsequent.M > 0: subsequent = subsequent.Child(0); continue; } break; } // If the two nodes don't agree on options in any way, don't try to optimize them. // TODO: Remove this once https://github.com/dotnet/runtime/issues/61048 is implemented. if (node.Options != subsequent.Options) { return false; } // If the successor is an alternation, all of its children need to be evaluated, since any of them // could come after this node. If any of them fail the optimization, then the whole node fails. // This applies to expression conditionals as well, as long as they have both a yes and a no branch (if there's // only a yes branch, we'd need to also check whatever comes after the conditional). It doesn't apply to // backreference conditionals, as the condition itself is unknown statically and could overlap with the // loop being considered for atomicity. switch (subsequent.Kind) { case RegexNodeKind.Alternate: case RegexNodeKind.ExpressionConditional when childCount == 3: // condition, yes, and no branch for (int i = 0; i < childCount; i++) { if (!CanBeMadeAtomic(node, subsequent.Child(i), allowSubsequentIteration)) { return false; } } return true; } // If this node is a {one/notone/set}loop, see if it overlaps with its successor in the concatenation. // If it doesn't, then we can upgrade it to being a {one/notone/set}loopatomic. // Doing so avoids unnecessary backtracking. switch (node.Kind) { case RegexNodeKind.Oneloop: switch (subsequent.Kind) { case RegexNodeKind.One when node.Ch != subsequent.Ch: case RegexNodeKind.Notone when node.Ch == subsequent.Ch: case RegexNodeKind.Set when !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && node.Ch != subsequent.Ch: case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic when subsequent.M > 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M > 0 && !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): case RegexNodeKind.Multi when node.Ch != subsequent.Str![0]: case RegexNodeKind.End: case RegexNodeKind.EndZ or RegexNodeKind.Eol when node.Ch != '\n': case RegexNodeKind.Boundary when RegexCharClass.IsBoundaryWordChar(node.Ch): case RegexNodeKind.NonBoundary when !RegexCharClass.IsBoundaryWordChar(node.Ch): case RegexNodeKind.ECMABoundary when RegexCharClass.IsECMAWordChar(node.Ch): case RegexNodeKind.NonECMABoundary when !RegexCharClass.IsECMAWordChar(node.Ch): return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && node.Ch != subsequent.Ch: case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic when subsequent.M == 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M == 0 && !RegexCharClass.CharInClass(node.Ch, subsequent.Str!): // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; case RegexNodeKind.Notoneloop: switch (subsequent.Kind) { case RegexNodeKind.One when node.Ch == subsequent.Ch: case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && node.Ch == subsequent.Ch: case RegexNodeKind.Multi when node.Ch == subsequent.Str![0]: case RegexNodeKind.End: return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && node.Ch == subsequent.Ch: // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; case RegexNodeKind.Setloop: switch (subsequent.Kind) { case RegexNodeKind.One when !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Set when !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M > 0 && !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M > 0 && !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): case RegexNodeKind.Multi when !RegexCharClass.CharInClass(subsequent.Str![0], node.Str!): case RegexNodeKind.End: case RegexNodeKind.EndZ or RegexNodeKind.Eol when !RegexCharClass.CharInClass('\n', node.Str!): case RegexNodeKind.Boundary when node.Str is RegexCharClass.WordClass or RegexCharClass.DigitClass: case RegexNodeKind.NonBoundary when node.Str is RegexCharClass.NotWordClass or RegexCharClass.NotDigitClass: case RegexNodeKind.ECMABoundary when node.Str is RegexCharClass.ECMAWordClass or RegexCharClass.ECMADigitClass: case RegexNodeKind.NonECMABoundary when node.Str is RegexCharClass.NotECMAWordClass or RegexCharClass.NotDigitClass: return true; case RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic when subsequent.M == 0 && !RegexCharClass.CharInClass(subsequent.Ch, node.Str!): case RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic when subsequent.M == 0 && !RegexCharClass.MayOverlap(node.Str!, subsequent.Str!): // The loop can be made atomic based on this subsequent node, but we'll need to evaluate the next one as well. break; default: return false; } break; default: return false; } // We only get here if the node could be made atomic based on subsequent but subsequent has a lower bound of zero // and thus we need to move subsequent to be the next node in sequence and loop around to try again. Debug.Assert(subsequent.Kind is RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy); Debug.Assert(subsequent.M == 0); if (!allowSubsequentIteration) { return false; } // To be conservative, we only walk up through a very limited set of constructs (even though we may have walked // down through more, like loops), looking for the next concatenation that we're not at the end of, at // which point subsequent becomes whatever node is next in that concatenation. while (true) { RegexNode? parent = subsequent.Parent; switch (parent?.Kind) { case RegexNodeKind.Atomic: case RegexNodeKind.Alternate: case RegexNodeKind.Capture: subsequent = parent; continue; case RegexNodeKind.Concatenate: var peers = (List<RegexNode>)parent.Children!; int currentIndex = peers.IndexOf(subsequent); Debug.Assert(currentIndex >= 0, "Node should have been in its parent's child list"); if (currentIndex + 1 == peers.Count) { subsequent = parent; continue; } else { subsequent = peers[currentIndex + 1]; break; } case null: // If we hit the root, we're at the end of the expression, at which point nothing could backtrack // in and we can declare success. return true; default: // Anything else, we don't know what to do, so we have to assume it could conflict with the loop. return false; } break; } } } /// <summary>Computes a min bound on the required length of any string that could possibly match.</summary> /// <returns>The min computed length. If the result is 0, there is no minimum we can enforce.</returns> /// <remarks> /// e.g. abc[def](ghijkl|mn) => 6 /// </remarks> public int ComputeMinLength() { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, assume there's no minimum we can enforce. return 0; } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: // Single character. return 1; case RegexNodeKind.Multi: // Every character in the string needs to match. return Str!.Length; case RegexNodeKind.Notonelazy: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Setlazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: // One character repeated at least M times. return M; case RegexNodeKind.Lazyloop: case RegexNodeKind.Loop: // A node graph repeated at least M times. return (int)Math.Min(int.MaxValue - 1, (long)M * Child(0).ComputeMinLength()); case RegexNodeKind.Alternate: // The minimum required length for any of the alternation's branches. { int childCount = ChildCount(); Debug.Assert(childCount >= 2); int min = Child(0).ComputeMinLength(); for (int i = 1; i < childCount && min > 0; i++) { min = Math.Min(min, Child(i).ComputeMinLength()); } return min; } case RegexNodeKind.BackreferenceConditional: // Minimum of its yes and no branches. The backreference doesn't add to the length. return Math.Min(Child(0).ComputeMinLength(), Child(1).ComputeMinLength()); case RegexNodeKind.ExpressionConditional: // Minimum of its yes and no branches. The condition is a zero-width assertion. return Math.Min(Child(1).ComputeMinLength(), Child(2).ComputeMinLength()); case RegexNodeKind.Concatenate: // The sum of all of the concatenation's children. { long sum = 0; int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { sum += Child(i).ComputeMinLength(); } return (int)Math.Min(int.MaxValue - 1, sum); } case RegexNodeKind.Atomic: case RegexNodeKind.Capture: case RegexNodeKind.Group: // For groups, we just delegate to the sole child. Debug.Assert(ChildCount() == 1); return Child(0).ComputeMinLength(); case RegexNodeKind.Empty: case RegexNodeKind.Nothing: case RegexNodeKind.UpdateBumpalong: // Nothing to match. In the future, we could potentially use Nothing to say that the min length // is infinite, but that would require a different structure, as that would only apply if the // Nothing match is required in all cases (rather than, say, as one branch of an alternation). case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Start: case RegexNodeKind.NegativeLookaround: case RegexNodeKind.PositiveLookaround: // Zero-width case RegexNodeKind.Backreference: // Requires matching data available only at run-time. In the future, we could choose to find // and follow the capture group this aligns with, while being careful not to end up in an // infinite cycle. return 0; default: Debug.Fail($"Unknown node: {Kind}"); goto case RegexNodeKind.Empty; } } /// <summary>Computes a maximum length of any string that could possibly match.</summary> /// <returns>The maximum length of any string that could possibly match, or null if the length may not always be the same.</returns> /// <remarks> /// e.g. abc[def](gh|ijklmnop) => 12 /// </remarks> public int? ComputeMaxLength() { if (!StackHelper.TryEnsureSufficientExecutionStack()) { // If we can't recur further, assume there's no minimum we can enforce. return null; } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: // Single character. return 1; case RegexNodeKind.Multi: // Every character in the string needs to match. return Str!.Length; case RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Onelazy or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Setlazy or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic: // Return the max number of iterations if there's an upper bound, or null if it's infinite return N == int.MaxValue ? null : N; case RegexNodeKind.Loop or RegexNodeKind.Lazyloop: if (N != int.MaxValue) { // A node graph repeated a fixed number of times if (Child(0).ComputeMaxLength() is int childMaxLength) { long maxLength = (long)N * childMaxLength; if (maxLength < int.MaxValue) { return (int)maxLength; } } } return null; case RegexNodeKind.Alternate: // The maximum length of any child branch, as long as they all have one. { int childCount = ChildCount(); Debug.Assert(childCount >= 2); if (Child(0).ComputeMaxLength() is not int maxLength) { return null; } for (int i = 1; i < childCount; i++) { if (Child(i).ComputeMaxLength() is not int next) { return null; } maxLength = Math.Max(maxLength, next); } return maxLength; } case RegexNodeKind.BackreferenceConditional: case RegexNodeKind.ExpressionConditional: // The maximum length of either child branch, as long as they both have one.. The condition for an expression conditional is a zero-width assertion. { int i = Kind == RegexNodeKind.BackreferenceConditional ? 0 : 1; return Child(i).ComputeMaxLength() is int yes && Child(i + 1).ComputeMaxLength() is int no ? Math.Max(yes, no) : null; } case RegexNodeKind.Concatenate: // The sum of all of the concatenation's children's max lengths, as long as they all have one. { long sum = 0; int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { if (Child(i).ComputeMaxLength() is not int length) { return null; } sum += length; } if (sum < int.MaxValue) { return (int)sum; } return null; } case RegexNodeKind.Atomic: case RegexNodeKind.Capture: // For groups, we just delegate to the sole child. Debug.Assert(ChildCount() == 1); return Child(0).ComputeMaxLength(); case RegexNodeKind.Empty: case RegexNodeKind.Nothing: case RegexNodeKind.UpdateBumpalong: case RegexNodeKind.Beginning: case RegexNodeKind.Bol: case RegexNodeKind.Boundary: case RegexNodeKind.ECMABoundary: case RegexNodeKind.End: case RegexNodeKind.EndZ: case RegexNodeKind.Eol: case RegexNodeKind.NonBoundary: case RegexNodeKind.NonECMABoundary: case RegexNodeKind.Start: case RegexNodeKind.PositiveLookaround: case RegexNodeKind.NegativeLookaround: // Zero-width return 0; case RegexNodeKind.Backreference: // Requires matching data available only at run-time. In the future, we could choose to find // and follow the capture group this aligns with, while being careful not to end up in an // infinite cycle. return null; default: Debug.Fail($"Unknown node: {Kind}"); goto case RegexNodeKind.Empty; } } /// <summary> /// Determine whether the specified child node is the beginning of a sequence that can /// trivially have length checks combined in order to avoid bounds checks. /// </summary> /// <param name="childIndex">The starting index of the child to check.</param> /// <param name="requiredLength">The sum of all the fixed lengths for the nodes in the sequence.</param> /// <param name="exclusiveEnd">The index of the node just after the last one in the sequence.</param> /// <returns>true if more than one node can have their length checks combined; otherwise, false.</returns> /// <remarks> /// There are additional node types for which we can prove a fixed length, e.g. examining all branches /// of an alternation and returning true if all their lengths are equal. However, the primary purpose /// of this method is to avoid bounds checks by consolidating length checks that guard accesses to /// strings/spans for which the JIT can see a fixed index within bounds, and alternations employ /// patterns that defeat that (e.g. reassigning the span in question). As such, the implementation /// remains focused on only a core subset of nodes that are a) likely to be used in concatenations and /// b) employ simple patterns of checks. /// </remarks> public bool TryGetJoinableLengthCheckChildRange(int childIndex, out int requiredLength, out int exclusiveEnd) { static bool CanJoinLengthCheck(RegexNode node) => node.Kind switch { RegexNodeKind.One or RegexNodeKind.Notone or RegexNodeKind.Set => true, RegexNodeKind.Multi => true, RegexNodeKind.Oneloop or RegexNodeKind.Onelazy or RegexNodeKind.Oneloopatomic or RegexNodeKind.Notoneloop or RegexNodeKind.Notonelazy or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Setloop or RegexNodeKind.Setlazy or RegexNodeKind.Setloopatomic when node.M == node.N => true, _ => false, }; RegexNode child = Child(childIndex); if (CanJoinLengthCheck(child)) { requiredLength = child.ComputeMinLength(); int childCount = ChildCount(); for (exclusiveEnd = childIndex + 1; exclusiveEnd < childCount; exclusiveEnd++) { child = Child(exclusiveEnd); if (!CanJoinLengthCheck(child)) { break; } requiredLength += child.ComputeMinLength(); } if (exclusiveEnd - childIndex > 1) { return true; } } requiredLength = 0; exclusiveEnd = 0; return false; } public RegexNode MakeQuantifier(bool lazy, int min, int max) { // Certain cases of repeaters (min == max) can be handled specially if (min == max) { switch (max) { case 0: // The node is repeated 0 times, so it's actually empty. return new RegexNode(RegexNodeKind.Empty, Options); case 1: // The node is repeated 1 time, so it's not actually a repeater. return this; case <= MultiVsRepeaterLimit when Kind == RegexNodeKind.One: // The same character is repeated a fixed number of times, so it's actually a multi. // While this could remain a repeater, multis are more readily optimized later in // processing. The counts used here in real-world expressions are invariably small (e.g. 4), // but we set an upper bound just to avoid creating really large strings. Debug.Assert(max >= 2); Kind = RegexNodeKind.Multi; Str = new string(Ch, max); Ch = '\0'; return this; } } switch (Kind) { case RegexNodeKind.One: case RegexNodeKind.Notone: case RegexNodeKind.Set: MakeRep(lazy ? RegexNodeKind.Onelazy : RegexNodeKind.Oneloop, min, max); return this; default: var result = new RegexNode(lazy ? RegexNodeKind.Lazyloop : RegexNodeKind.Loop, Options, min, max); result.AddChild(this); return result; } } public void AddChild(RegexNode newChild) { newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented if (Children is null) { Children = newChild; } else if (Children is RegexNode currentChild) { Children = new List<RegexNode>() { currentChild, newChild }; } else { ((List<RegexNode>)Children).Add(newChild); } } public void InsertChild(int index, RegexNode newChild) { Debug.Assert(Children is List<RegexNode>); newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented ((List<RegexNode>)Children).Insert(index, newChild); } public void ReplaceChild(int index, RegexNode newChild) { Debug.Assert(Children != null); Debug.Assert(index < ChildCount()); newChild.Parent = this; // so that the child can see its parent while being reduced newChild = newChild.Reduce(); newChild.Parent = this; // in case Reduce returns a different node that needs to be reparented if (Children is RegexNode) { Children = newChild; } else { ((List<RegexNode>)Children)[index] = newChild; } } public RegexNode Child(int i) => Children is RegexNode child ? child : ((List<RegexNode>)Children!)[i]; public int ChildCount() { if (Children is null) { return 0; } if (Children is List<RegexNode> children) { return children.Count; } Debug.Assert(Children is RegexNode); return 1; } // Determines whether the node supports a compilation / code generation strategy based on walking the node tree. // Also returns a human-readable string to explain the reason (it will be emitted by the source generator, hence // there's no need to localize). internal bool SupportsCompilation([NotNullWhen(false)] out string? reason) { if (!StackHelper.TryEnsureSufficientExecutionStack()) { reason = "run-time limits were exceeded"; return false; } // NonBacktracking isn't supported, nor RightToLeft. The latter applies to both the top-level // options as well as when used to specify positive and negative lookbehinds. if ((Options & RegexOptions.NonBacktracking) != 0) { reason = "RegexOptions.NonBacktracking was specified"; return false; } if ((Options & RegexOptions.RightToLeft) != 0) { reason = "RegexOptions.RightToLeft or a positive/negative lookbehind was used"; return false; } int childCount = ChildCount(); for (int i = 0; i < childCount; i++) { // The node isn't supported if any of its children aren't supported. if (!Child(i).SupportsCompilation(out reason)) { return false; } } // Supported. reason = null; return true; } /// <summary>Gets whether the node is a Set/Setloop/Setloopatomic/Setlazy node.</summary> public bool IsSetFamily => Kind is RegexNodeKind.Set or RegexNodeKind.Setloop or RegexNodeKind.Setloopatomic or RegexNodeKind.Setlazy; /// <summary>Gets whether the node is a One/Oneloop/Oneloopatomic/Onelazy node.</summary> public bool IsOneFamily => Kind is RegexNodeKind.One or RegexNodeKind.Oneloop or RegexNodeKind.Oneloopatomic or RegexNodeKind.Onelazy; /// <summary>Gets whether the node is a Notone/Notoneloop/Notoneloopatomic/Notonelazy node.</summary> public bool IsNotoneFamily => Kind is RegexNodeKind.Notone or RegexNodeKind.Notoneloop or RegexNodeKind.Notoneloopatomic or RegexNodeKind.Notonelazy; /// <summary>Gets whether this node is contained inside of a loop.</summary> public bool IsInLoop() { for (RegexNode? parent = Parent; parent is not null; parent = parent.Parent) { if (parent.Kind is RegexNodeKind.Loop or RegexNodeKind.Lazyloop) { return true; } } return false; } #if DEBUG [ExcludeFromCodeCoverage] public override string ToString() { RegexNode? curNode = this; int curChild = 0; var sb = new StringBuilder().AppendLine(curNode.Describe()); var stack = new List<int>(); while (true) { if (curChild < curNode!.ChildCount()) { stack.Add(curChild + 1); curNode = curNode.Child(curChild); curChild = 0; sb.Append(new string(' ', stack.Count * 2)).Append(curNode.Describe()).AppendLine(); } else { if (stack.Count == 0) { break; } curChild = stack[stack.Count - 1]; stack.RemoveAt(stack.Count - 1); curNode = curNode.Parent; } } return sb.ToString(); } [ExcludeFromCodeCoverage] private string Describe() { var sb = new StringBuilder(Kind.ToString()); if ((Options & RegexOptions.ExplicitCapture) != 0) sb.Append("-C"); if ((Options & RegexOptions.IgnoreCase) != 0) sb.Append("-I"); if ((Options & RegexOptions.RightToLeft) != 0) sb.Append("-L"); if ((Options & RegexOptions.Multiline) != 0) sb.Append("-M"); if ((Options & RegexOptions.Singleline) != 0) sb.Append("-S"); if ((Options & RegexOptions.IgnorePatternWhitespace) != 0) sb.Append("-X"); if ((Options & RegexOptions.ECMAScript) != 0) sb.Append("-E"); switch (Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.One: case RegexNodeKind.Notone: sb.Append(" '").Append(RegexCharClass.DescribeChar(Ch)).Append('\''); break; case RegexNodeKind.Capture: sb.Append(' ').Append($"index = {M}"); if (N != -1) { sb.Append($", unindex = {N}"); } break; case RegexNodeKind.Backreference: case RegexNodeKind.BackreferenceConditional: sb.Append(' ').Append($"index = {M}"); break; case RegexNodeKind.Multi: sb.Append(" \"").Append(Str).Append('"'); break; case RegexNodeKind.Set: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: sb.Append(' ').Append(RegexCharClass.DescribeSet(Str!)); break; } switch (Kind) { case RegexNodeKind.Oneloop: case RegexNodeKind.Oneloopatomic: case RegexNodeKind.Notoneloop: case RegexNodeKind.Notoneloopatomic: case RegexNodeKind.Onelazy: case RegexNodeKind.Notonelazy: case RegexNodeKind.Setloop: case RegexNodeKind.Setloopatomic: case RegexNodeKind.Setlazy: case RegexNodeKind.Loop: case RegexNodeKind.Lazyloop: sb.Append( (M == 0 && N == int.MaxValue) ? "*" : (M == 0 && N == 1) ? "?" : (M == 1 && N == int.MaxValue) ? "+" : (N == int.MaxValue) ? $"{{{M}, *}}" : (N == M) ? $"{{{M}}}" : $"{{{M}, {N}}}"); break; } return sb.ToString(); } #endif } }
1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Data.Common/tests/System/Xml/XmlDataDocumentTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable 0618 // use of obsolete methods using System.Data; using System.Data.Tests; using System.IO; using Xunit; namespace System.Xml.Tests { public class XmlDataDocumentTests { [Fact] public static void XmlDataDocument_DataSet() { DataTable parent = DataProvider.CreateParentDataTable(); DataTable child = DataProvider.CreateChildDataTable(); DataSet ds = new DataSet(); ds.Tables.Add(parent); ds.Tables.Add(child); XmlDataDocument doc = new XmlDataDocument(ds); Assert.IsType<DataSet>(doc.DataSet); Assert.Equal(ds, doc.DataSet); } [Fact] public static void XmlDataDocument_CloneNode() { DataSet ds = Create(); _ = ds.Tables[0].Rows[0]; XmlDataDocument doc = new XmlDataDocument(ds); ds.EnforceConstraints = false; XmlNode node = doc.CloneNode(deep: true); Assert.True(node.HasChildNodes); node = doc.CloneNode(deep: false); Assert.False(node.HasChildNodes); } [Fact] public static void XmlDataDocument_CreateElement() { XmlDataDocument doc = new XmlDataDocument(); XmlElement element = doc.CreateElement("prefix", "localName", "namespaceURI"); Assert.NotNull(element); Assert.Equal("prefix", element.Prefix); Assert.Equal("prefix:localName", element.Name); Assert.Equal("namespaceURI", element.NamespaceURI); } [Fact] public static void XmlDataDocument_CreateEntityReference() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<NotSupportedException>(() => doc.CreateEntityReference("name")); } [Fact] public static void XmlDataDocument_GetElementById() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<NotSupportedException>(() => doc.GetElementById("elemId")); } [Fact] public static void XmlDataDocument_GetElementFromRow() { DataSet ds = Create(); XmlDataDocument doc = new XmlDataDocument(ds); XmlElement element = doc.GetElementFromRow(ds.Tables[0].Rows[0]); Assert.NotNull(element); Assert.Equal("Test", element.Name); } [Fact] public static void XmlDataDocument_GetElementsByTagName() { XmlDataDocument doc = new XmlDataDocument(); XmlNodeList nodeList = doc.GetElementsByTagName("missingTag"); Assert.NotNull(nodeList); Assert.Equal(0, nodeList.Count); DataSet ds = Create(); doc = new XmlDataDocument(ds); nodeList = doc.GetElementsByTagName("Test"); Assert.NotNull(nodeList); Assert.Equal(1, nodeList.Count); Assert.True(nodeList[0].HasChildNodes); } [Fact] public static void XmlDataDocument_GetRowFromElement() { DataSet ds = Create(); DataRow dr = ds.Tables[0].Rows[0]; XmlDataDocument doc = new XmlDataDocument(ds); XmlElement xmlElement = doc.GetElementFromRow(dr); Assert.Equal(dr, doc.GetRowFromElement(xmlElement)); } [Fact] public static void XmlDataDocument_Load_Throws() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<FileNotFoundException>(() => doc.Load("missingfile")); } [Fact] public static void XmlDataDocument_LoadXmlReader() { string xml = "<CustomTypesData>" + Environment.NewLine + "<CustomTypesTable>" + Environment.NewLine + "<Dummy>99</Dummy>" + Environment.NewLine + "</CustomTypesTable>" + Environment.NewLine + "</CustomTypesData>" + Environment.NewLine; StringReader sr = new StringReader(xml); XmlReader xr = new XmlTextReader(sr); XmlDataDocument doc = new XmlDataDocument(); doc.Load(xr); var nodeList = doc.GetElementsByTagName("CustomTypesData"); Assert.NotNull(nodeList); Assert.Equal(1, nodeList.Count); Assert.True(nodeList[0].HasChildNodes); Assert.Equal("CustomTypesData", nodeList[0].Name); } private static DataSet Create() { DataSet ds = new DataSet("Set"); DataTable dt = new DataTable("Test"); dt.Columns.Add("CustName", typeof(string)); dt.Columns.Add("Type", typeof(Type)); DataRow dr = dt.NewRow(); dr["CustName"] = DBNull.Value; dr["Type"] = typeof(DBNull); dt.Rows.Add(dr); ds.Tables.Add(dt); return ds; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma warning disable 0618 // use of obsolete methods using System.Data; using System.Data.Tests; using System.IO; using Xunit; namespace System.Xml.Tests { public class XmlDataDocumentTests { [Fact] public static void XmlDataDocument_DataSet() { DataTable parent = DataProvider.CreateParentDataTable(); DataTable child = DataProvider.CreateChildDataTable(); DataSet ds = new DataSet(); ds.Tables.Add(parent); ds.Tables.Add(child); XmlDataDocument doc = new XmlDataDocument(ds); Assert.IsType<DataSet>(doc.DataSet); Assert.Equal(ds, doc.DataSet); } [Fact] public static void XmlDataDocument_CloneNode() { DataSet ds = Create(); _ = ds.Tables[0].Rows[0]; XmlDataDocument doc = new XmlDataDocument(ds); ds.EnforceConstraints = false; XmlNode node = doc.CloneNode(deep: true); Assert.True(node.HasChildNodes); node = doc.CloneNode(deep: false); Assert.False(node.HasChildNodes); } [Fact] public static void XmlDataDocument_CreateElement() { XmlDataDocument doc = new XmlDataDocument(); XmlElement element = doc.CreateElement("prefix", "localName", "namespaceURI"); Assert.NotNull(element); Assert.Equal("prefix", element.Prefix); Assert.Equal("prefix:localName", element.Name); Assert.Equal("namespaceURI", element.NamespaceURI); } [Fact] public static void XmlDataDocument_CreateEntityReference() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<NotSupportedException>(() => doc.CreateEntityReference("name")); } [Fact] public static void XmlDataDocument_GetElementById() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<NotSupportedException>(() => doc.GetElementById("elemId")); } [Fact] public static void XmlDataDocument_GetElementFromRow() { DataSet ds = Create(); XmlDataDocument doc = new XmlDataDocument(ds); XmlElement element = doc.GetElementFromRow(ds.Tables[0].Rows[0]); Assert.NotNull(element); Assert.Equal("Test", element.Name); } [Fact] public static void XmlDataDocument_GetElementsByTagName() { XmlDataDocument doc = new XmlDataDocument(); XmlNodeList nodeList = doc.GetElementsByTagName("missingTag"); Assert.NotNull(nodeList); Assert.Equal(0, nodeList.Count); DataSet ds = Create(); doc = new XmlDataDocument(ds); nodeList = doc.GetElementsByTagName("Test"); Assert.NotNull(nodeList); Assert.Equal(1, nodeList.Count); Assert.True(nodeList[0].HasChildNodes); } [Fact] public static void XmlDataDocument_GetRowFromElement() { DataSet ds = Create(); DataRow dr = ds.Tables[0].Rows[0]; XmlDataDocument doc = new XmlDataDocument(ds); XmlElement xmlElement = doc.GetElementFromRow(dr); Assert.Equal(dr, doc.GetRowFromElement(xmlElement)); } [Fact] public static void XmlDataDocument_Load_Throws() { XmlDataDocument doc = new XmlDataDocument(); Assert.Throws<FileNotFoundException>(() => doc.Load("missingfile")); } [Fact] public static void XmlDataDocument_LoadXmlReader() { string xml = "<CustomTypesData>" + Environment.NewLine + "<CustomTypesTable>" + Environment.NewLine + "<Dummy>99</Dummy>" + Environment.NewLine + "</CustomTypesTable>" + Environment.NewLine + "</CustomTypesData>" + Environment.NewLine; StringReader sr = new StringReader(xml); XmlReader xr = new XmlTextReader(sr); XmlDataDocument doc = new XmlDataDocument(); doc.Load(xr); var nodeList = doc.GetElementsByTagName("CustomTypesData"); Assert.NotNull(nodeList); Assert.Equal(1, nodeList.Count); Assert.True(nodeList[0].HasChildNodes); Assert.Equal("CustomTypesData", nodeList[0].Name); } private static DataSet Create() { DataSet ds = new DataSet("Set"); DataTable dt = new DataTable("Test"); dt.Columns.Add("CustName", typeof(string)); dt.Columns.Add("Type", typeof(Type)); DataRow dr = dt.NewRow(); dr["CustName"] = DBNull.Value; dr["Type"] = typeof(DBNull); dt.Rows.Add(dr); ds.Tables.Add(dt); return ds; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.ComponentModel.TypeConverter/tests/ToolboxItemFilterAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.ComponentModel.Tests { public class ToolboxItemFilterAttributeTests { [Theory] [InlineData(null)] [InlineData("")] [InlineData("filterString")] public void Ctor_String(string filterString) { var attribute = new ToolboxItemFilterAttribute(filterString); Assert.Equal(filterString ?? string.Empty, attribute.FilterString); Assert.Equal(ToolboxItemFilterType.Allow, attribute.FilterType); } [Theory] [InlineData(null, ToolboxItemFilterType.Allow)] [InlineData("", ToolboxItemFilterType.Custom)] [InlineData("filterString", ToolboxItemFilterType.Prevent)] [InlineData("filterString", ToolboxItemFilterType.Require)] [InlineData("filterString", ToolboxItemFilterType.Allow - 1)] [InlineData("filterString", ToolboxItemFilterType.Require + 1)] public void Ctor_String_ToolboxItemFilterType(string filterString, ToolboxItemFilterType filterType) { var attribute = new ToolboxItemFilterAttribute(filterString, filterType); Assert.Equal(filterString ?? string.Empty, attribute.FilterString); Assert.Equal(filterType, attribute.FilterType); } [Theory] [InlineData(null, "System.ComponentModel.ToolboxItemFilterAttribute")] [InlineData("", "System.ComponentModel.ToolboxItemFilterAttribute")] [InlineData("filterString", "System.ComponentModel.ToolboxItemFilterAttributefilterString")] public void TypeId_ValidEditorBaseTypeName_ReturnsExcepted(string filterType, object expected) { var attribute = new ToolboxItemFilterAttribute(filterType); Assert.Equal(expected, attribute.TypeId); Assert.Same(attribute.TypeId, attribute.TypeId); } public static IEnumerable<object[]> Equals_TestData() { var attribute = new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow); yield return new object[] { attribute, attribute, true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow), true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterstring", ToolboxItemFilterType.Allow), false }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Custom), false }; yield return new object[] { attribute, new object(), false }; yield return new object[] { attribute, null, false }; } [Theory] [MemberData(nameof(Equals_TestData))] public void Equals_Object_ReturnsExpected(ToolboxItemFilterAttribute attribute, object other, bool expected) { Assert.Equal(expected, attribute.Equals(other)); } [Fact] public void GetHashCode_Invoke_ReturnsConsistentValue() { var attribute = new ToolboxItemFilterAttribute("filterString"); Assert.Equal(attribute.GetHashCode(), attribute.GetHashCode()); } public static IEnumerable<object[]> Match_TestData() { var attribute = new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow); yield return new object[] { attribute, attribute, true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow), true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterstring", ToolboxItemFilterType.Allow), false }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Custom), true }; yield return new object[] { attribute, new object(), false }; yield return new object[] { attribute, null, false }; } [Theory] [MemberData(nameof(Match_TestData))] public void Match_Object_ReturnsExpected(ToolboxItemFilterAttribute attribute, object other, bool expected) { Assert.Equal(expected, attribute.Match(other)); } [Theory] [InlineData(null, ToolboxItemFilterType.Allow, ",Allow")] [InlineData("", ToolboxItemFilterType.Custom, ",Custom")] [InlineData("filterString", ToolboxItemFilterType.Prevent, "filterString,Prevent")] [InlineData("filterString", ToolboxItemFilterType.Require, "filterString,Require")] [InlineData("filterString", ToolboxItemFilterType.Allow - 1, "filterString,")] [InlineData("filterString", ToolboxItemFilterType.Require + 1, "filterString,")] public void ToString_Invoke_ReturnsExpected(string filterString, ToolboxItemFilterType filterType, string expected) { var attribute = new ToolboxItemFilterAttribute(filterString, filterType); Assert.Equal(expected, attribute.ToString()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.ComponentModel.Tests { public class ToolboxItemFilterAttributeTests { [Theory] [InlineData(null)] [InlineData("")] [InlineData("filterString")] public void Ctor_String(string filterString) { var attribute = new ToolboxItemFilterAttribute(filterString); Assert.Equal(filterString ?? string.Empty, attribute.FilterString); Assert.Equal(ToolboxItemFilterType.Allow, attribute.FilterType); } [Theory] [InlineData(null, ToolboxItemFilterType.Allow)] [InlineData("", ToolboxItemFilterType.Custom)] [InlineData("filterString", ToolboxItemFilterType.Prevent)] [InlineData("filterString", ToolboxItemFilterType.Require)] [InlineData("filterString", ToolboxItemFilterType.Allow - 1)] [InlineData("filterString", ToolboxItemFilterType.Require + 1)] public void Ctor_String_ToolboxItemFilterType(string filterString, ToolboxItemFilterType filterType) { var attribute = new ToolboxItemFilterAttribute(filterString, filterType); Assert.Equal(filterString ?? string.Empty, attribute.FilterString); Assert.Equal(filterType, attribute.FilterType); } [Theory] [InlineData(null, "System.ComponentModel.ToolboxItemFilterAttribute")] [InlineData("", "System.ComponentModel.ToolboxItemFilterAttribute")] [InlineData("filterString", "System.ComponentModel.ToolboxItemFilterAttributefilterString")] public void TypeId_ValidEditorBaseTypeName_ReturnsExcepted(string filterType, object expected) { var attribute = new ToolboxItemFilterAttribute(filterType); Assert.Equal(expected, attribute.TypeId); Assert.Same(attribute.TypeId, attribute.TypeId); } public static IEnumerable<object[]> Equals_TestData() { var attribute = new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow); yield return new object[] { attribute, attribute, true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow), true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterstring", ToolboxItemFilterType.Allow), false }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Custom), false }; yield return new object[] { attribute, new object(), false }; yield return new object[] { attribute, null, false }; } [Theory] [MemberData(nameof(Equals_TestData))] public void Equals_Object_ReturnsExpected(ToolboxItemFilterAttribute attribute, object other, bool expected) { Assert.Equal(expected, attribute.Equals(other)); } [Fact] public void GetHashCode_Invoke_ReturnsConsistentValue() { var attribute = new ToolboxItemFilterAttribute("filterString"); Assert.Equal(attribute.GetHashCode(), attribute.GetHashCode()); } public static IEnumerable<object[]> Match_TestData() { var attribute = new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow); yield return new object[] { attribute, attribute, true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Allow), true }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterstring", ToolboxItemFilterType.Allow), false }; yield return new object[] { attribute, new ToolboxItemFilterAttribute("filterString", ToolboxItemFilterType.Custom), true }; yield return new object[] { attribute, new object(), false }; yield return new object[] { attribute, null, false }; } [Theory] [MemberData(nameof(Match_TestData))] public void Match_Object_ReturnsExpected(ToolboxItemFilterAttribute attribute, object other, bool expected) { Assert.Equal(expected, attribute.Match(other)); } [Theory] [InlineData(null, ToolboxItemFilterType.Allow, ",Allow")] [InlineData("", ToolboxItemFilterType.Custom, ",Custom")] [InlineData("filterString", ToolboxItemFilterType.Prevent, "filterString,Prevent")] [InlineData("filterString", ToolboxItemFilterType.Require, "filterString,Require")] [InlineData("filterString", ToolboxItemFilterType.Allow - 1, "filterString,")] [InlineData("filterString", ToolboxItemFilterType.Require + 1, "filterString,")] public void ToString_Invoke_ReturnsExpected(string filterString, ToolboxItemFilterType filterType, string expected) { var attribute = new ToolboxItemFilterAttribute(filterString, filterType); Assert.Equal(expected, attribute.ToString()); } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ModuleTokenResolver.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Concurrent; using System.Collections.Immutable; using System.Reflection.Metadata; using System.Reflection.Metadata.Ecma335; using Internal.JitInterface; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using Internal.CorConstants; namespace ILCompiler.DependencyAnalysis.ReadyToRun { /// <summary> /// This class is used to back-resolve typesystem elements from /// external version bubbles to references relative to the current /// versioning bubble. /// </summary> public class ModuleTokenResolver { /// <summary> /// Reverse lookup table mapping external types to reference tokens in the input modules. The table /// gets lazily initialized as various tokens are resolved in CorInfoImpl. /// </summary> private readonly ConcurrentDictionary<EcmaType, ModuleToken> _typeToRefTokens = new ConcurrentDictionary<EcmaType, ModuleToken>(); private readonly ConcurrentDictionary<FieldDesc, ModuleToken> _fieldToRefTokens = new ConcurrentDictionary<FieldDesc, ModuleToken>(); private readonly CompilationModuleGroup _compilationModuleGroup; private Func<EcmaModule, int> _moduleIndexLookup; public CompilerTypeSystemContext CompilerContext { get; } public ModuleTokenResolver(CompilationModuleGroup compilationModuleGroup, CompilerTypeSystemContext typeSystemContext) { _compilationModuleGroup = compilationModuleGroup; CompilerContext = typeSystemContext; } public void SetModuleIndexLookup(Func<EcmaModule, int> moduleIndexLookup) { _moduleIndexLookup = moduleIndexLookup; } public ModuleToken GetModuleTokenForType(EcmaType type, bool throwIfNotFound = true) { if (_compilationModuleGroup.VersionsWithType(type)) { return new ModuleToken(type.EcmaModule, (mdToken)MetadataTokens.GetToken(type.Handle)); } ModuleToken token; if (_typeToRefTokens.TryGetValue(type, out token)) { return token; } // If the token was not lazily mapped, search the input compilation set for a type reference token if (_compilationModuleGroup.TryGetModuleTokenForExternalType(type, out token)) { return token; } // Reverse lookup failed if (throwIfNotFound) { throw new NotImplementedException(type.ToString()); } else { return default(ModuleToken); } } public ModuleToken GetModuleTokenForMethod(MethodDesc method, bool throwIfNotFound = true) { method = method.GetCanonMethodTarget(CanonicalFormKind.Specific); if (_compilationModuleGroup.VersionsWithMethodBody(method) && method.GetTypicalMethodDefinition() is EcmaMethod ecmaMethod) { return new ModuleToken(ecmaMethod.Module, ecmaMethod.Handle); } // Reverse lookup failed if (throwIfNotFound) { throw new NotImplementedException(method.ToString()); } else { return default(ModuleToken); } } public ModuleToken GetModuleTokenForField(FieldDesc field, bool throwIfNotFound = true) { if (_compilationModuleGroup.VersionsWithType(field.OwningType) && field is EcmaField ecmaField) { return new ModuleToken(ecmaField.Module, ecmaField.Handle); } TypeDesc owningCanonType = field.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); FieldDesc canonField = field; if (owningCanonType != field.OwningType) { canonField = CompilerContext.GetFieldForInstantiatedType(field.GetTypicalFieldDefinition(), (InstantiatedType)owningCanonType); } ModuleToken token; if (_fieldToRefTokens.TryGetValue(canonField, out token)) { return token; } if (throwIfNotFound) { throw new NotImplementedException(field.ToString()); } else { return default(ModuleToken); } } public void AddModuleTokenForMethod(MethodDesc method, ModuleToken token) { if (token.TokenType == CorTokenType.mdtMethodSpec) { MethodSpecification methodSpec = token.MetadataReader.GetMethodSpecification((MethodSpecificationHandle)token.Handle); methodSpec.DecodeSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); token = new ModuleToken(token.Module, methodSpec.Method); } if (token.TokenType == CorTokenType.mdtMemberRef) { MemberReference memberRef = token.MetadataReader.GetMemberReference((MemberReferenceHandle)token.Handle); EntityHandle owningTypeHandle = memberRef.Parent; TypeDesc owningType = (TypeDesc)token.Module.GetObject(owningTypeHandle, NotFoundBehavior.Throw); AddModuleTokenForType(owningType, new ModuleToken(token.Module, owningTypeHandle)); memberRef.DecodeMethodSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } if (token.TokenType == CorTokenType.mdtMethodDef) { MethodDefinition methodDef = token.MetadataReader.GetMethodDefinition((MethodDefinitionHandle)token.Handle); methodDef.DecodeSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } } private void AddModuleTokenForFieldReference(TypeDesc owningType, ModuleToken token) { MemberReference memberRef = token.MetadataReader.GetMemberReference((MemberReferenceHandle)token.Handle); EntityHandle owningTypeHandle = memberRef.Parent; AddModuleTokenForType(owningType, new ModuleToken(token.Module, owningTypeHandle)); memberRef.DecodeFieldSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } public void AddModuleTokenForField(FieldDesc field, ModuleToken token) { if (_compilationModuleGroup.VersionsWithType(field.OwningType) && field.OwningType is EcmaType) { // We don't need to store handles within the current compilation group // as we can read them directly from the ECMA objects. return; } TypeDesc owningCanonType = field.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); FieldDesc canonField = field; if (owningCanonType != field.OwningType) { canonField = CompilerContext.GetFieldForInstantiatedType(field.GetTypicalFieldDefinition(), (InstantiatedType)owningCanonType); } SetModuleTokenForTypeSystemEntity(_fieldToRefTokens, canonField, token); switch (token.TokenType) { case CorTokenType.mdtMemberRef: AddModuleTokenForFieldReference(owningCanonType, token); break; default: throw new NotImplementedException(); } } // Add TypeSystemEntity -> ModuleToken mapping to a ConcurrentDictionary. Using CompareTo sort the token used, so it will // be consistent in all runs of the compiler void SetModuleTokenForTypeSystemEntity<T>(ConcurrentDictionary<T, ModuleToken> dictionary, T tse, ModuleToken token) { if (!dictionary.TryAdd(tse, token)) { ModuleToken oldToken; do { // We will reach here, if the field already has a token if (!dictionary.TryGetValue(tse, out oldToken)) throw new InternalCompilerErrorException("TypeSystemEntity both present and not present in emission dictionary."); if (oldToken.CompareTo(token) <= 0) break; } while (dictionary.TryUpdate(tse, token, oldToken)); } } public void AddModuleTokenForType(TypeDesc type, ModuleToken token) { bool specialTypeFound = false; // Collect underlying type tokens for type specifications if (token.TokenType == CorTokenType.mdtTypeSpec) { TypeSpecification typeSpec = token.MetadataReader.GetTypeSpecification((TypeSpecificationHandle)token.Handle); typeSpec.DecodeSignature(new TokenResolverProvider(this, token.Module), this); specialTypeFound = true; } if (_compilationModuleGroup.VersionsWithType(type)) { // We don't need to store handles within the current compilation group // as we can read them directly from the ECMA objects. return; } if (type is EcmaType ecmaType) { // Don't store typespec tokens where a generic parameter resolves to the type in question if (token.TokenType == CorTokenType.mdtTypeDef || token.TokenType == CorTokenType.mdtTypeRef) { SetModuleTokenForTypeSystemEntity(_typeToRefTokens, ecmaType, token); } } else if (!specialTypeFound) { throw new NotImplementedException(type.ToString()); } } public int GetModuleIndex(EcmaModule module) { return _moduleIndexLookup(module); } /// <summary> /// As of 8/20/2018, recursive propagation of type information through /// the composite signature tree is not needed for anything. We're adding /// a dummy class to clearly indicate what aspects of the resolver need /// changing if the propagation becomes necessary. /// </summary> private class DummyTypeInfo { public static DummyTypeInfo Instance = new DummyTypeInfo(); } private class TokenResolverProvider : ISignatureTypeProvider<DummyTypeInfo, ModuleTokenResolver> { ModuleTokenResolver _resolver; EcmaModule _contextModule; public TokenResolverProvider(ModuleTokenResolver resolver, EcmaModule contextModule) { _resolver = resolver; _contextModule = contextModule; } public DummyTypeInfo GetArrayType(DummyTypeInfo elementType, ArrayShape shape) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetByReferenceType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetFunctionPointerType(MethodSignature<DummyTypeInfo> signature) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericInstantiation(DummyTypeInfo genericType, ImmutableArray<DummyTypeInfo> typeArguments) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericMethodParameter(ModuleTokenResolver genericContext, int index) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericTypeParameter(ModuleTokenResolver genericContext, int index) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetModifiedType(DummyTypeInfo modifier, DummyTypeInfo unmodifiedType, bool isRequired) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPinnedType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPointerType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPrimitiveType(PrimitiveTypeCode typeCode) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetSZArrayType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromDefinition(MetadataReader reader, TypeDefinitionHandle handle, byte rawTypeKind) { // Type definition tokens outside of the versioning bubble are useless. return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromReference(MetadataReader reader, TypeReferenceHandle handle, byte rawTypeKind) { _resolver.AddModuleTokenForType((TypeDesc)_contextModule.GetObject(handle), new ModuleToken(_contextModule, handle)); return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromSpecification(MetadataReader reader, ModuleTokenResolver genericContext, TypeSpecificationHandle handle, byte rawTypeKind) { TypeSpecification typeSpec = reader.GetTypeSpecification(handle); typeSpec.DecodeSignature(this, genericContext); return DummyTypeInfo.Instance; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Concurrent; using System.Collections.Immutable; using System.Reflection.Metadata; using System.Reflection.Metadata.Ecma335; using Internal.JitInterface; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; using Internal.CorConstants; namespace ILCompiler.DependencyAnalysis.ReadyToRun { /// <summary> /// This class is used to back-resolve typesystem elements from /// external version bubbles to references relative to the current /// versioning bubble. /// </summary> public class ModuleTokenResolver { /// <summary> /// Reverse lookup table mapping external types to reference tokens in the input modules. The table /// gets lazily initialized as various tokens are resolved in CorInfoImpl. /// </summary> private readonly ConcurrentDictionary<EcmaType, ModuleToken> _typeToRefTokens = new ConcurrentDictionary<EcmaType, ModuleToken>(); private readonly ConcurrentDictionary<FieldDesc, ModuleToken> _fieldToRefTokens = new ConcurrentDictionary<FieldDesc, ModuleToken>(); private readonly CompilationModuleGroup _compilationModuleGroup; private Func<EcmaModule, int> _moduleIndexLookup; public CompilerTypeSystemContext CompilerContext { get; } public ModuleTokenResolver(CompilationModuleGroup compilationModuleGroup, CompilerTypeSystemContext typeSystemContext) { _compilationModuleGroup = compilationModuleGroup; CompilerContext = typeSystemContext; } public void SetModuleIndexLookup(Func<EcmaModule, int> moduleIndexLookup) { _moduleIndexLookup = moduleIndexLookup; } public ModuleToken GetModuleTokenForType(EcmaType type, bool throwIfNotFound = true) { if (_compilationModuleGroup.VersionsWithType(type)) { return new ModuleToken(type.EcmaModule, (mdToken)MetadataTokens.GetToken(type.Handle)); } ModuleToken token; if (_typeToRefTokens.TryGetValue(type, out token)) { return token; } // If the token was not lazily mapped, search the input compilation set for a type reference token if (_compilationModuleGroup.TryGetModuleTokenForExternalType(type, out token)) { return token; } // Reverse lookup failed if (throwIfNotFound) { throw new NotImplementedException(type.ToString()); } else { return default(ModuleToken); } } public ModuleToken GetModuleTokenForMethod(MethodDesc method, bool throwIfNotFound = true) { method = method.GetCanonMethodTarget(CanonicalFormKind.Specific); if (_compilationModuleGroup.VersionsWithMethodBody(method) && method.GetTypicalMethodDefinition() is EcmaMethod ecmaMethod) { return new ModuleToken(ecmaMethod.Module, ecmaMethod.Handle); } // Reverse lookup failed if (throwIfNotFound) { throw new NotImplementedException(method.ToString()); } else { return default(ModuleToken); } } public ModuleToken GetModuleTokenForField(FieldDesc field, bool throwIfNotFound = true) { if (_compilationModuleGroup.VersionsWithType(field.OwningType) && field is EcmaField ecmaField) { return new ModuleToken(ecmaField.Module, ecmaField.Handle); } TypeDesc owningCanonType = field.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); FieldDesc canonField = field; if (owningCanonType != field.OwningType) { canonField = CompilerContext.GetFieldForInstantiatedType(field.GetTypicalFieldDefinition(), (InstantiatedType)owningCanonType); } ModuleToken token; if (_fieldToRefTokens.TryGetValue(canonField, out token)) { return token; } if (throwIfNotFound) { throw new NotImplementedException(field.ToString()); } else { return default(ModuleToken); } } public void AddModuleTokenForMethod(MethodDesc method, ModuleToken token) { if (token.TokenType == CorTokenType.mdtMethodSpec) { MethodSpecification methodSpec = token.MetadataReader.GetMethodSpecification((MethodSpecificationHandle)token.Handle); methodSpec.DecodeSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); token = new ModuleToken(token.Module, methodSpec.Method); } if (token.TokenType == CorTokenType.mdtMemberRef) { MemberReference memberRef = token.MetadataReader.GetMemberReference((MemberReferenceHandle)token.Handle); EntityHandle owningTypeHandle = memberRef.Parent; TypeDesc owningType = (TypeDesc)token.Module.GetObject(owningTypeHandle, NotFoundBehavior.Throw); AddModuleTokenForType(owningType, new ModuleToken(token.Module, owningTypeHandle)); memberRef.DecodeMethodSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } if (token.TokenType == CorTokenType.mdtMethodDef) { MethodDefinition methodDef = token.MetadataReader.GetMethodDefinition((MethodDefinitionHandle)token.Handle); methodDef.DecodeSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } } private void AddModuleTokenForFieldReference(TypeDesc owningType, ModuleToken token) { MemberReference memberRef = token.MetadataReader.GetMemberReference((MemberReferenceHandle)token.Handle); EntityHandle owningTypeHandle = memberRef.Parent; AddModuleTokenForType(owningType, new ModuleToken(token.Module, owningTypeHandle)); memberRef.DecodeFieldSignature<DummyTypeInfo, ModuleTokenResolver>(new TokenResolverProvider(this, token.Module), this); } public void AddModuleTokenForField(FieldDesc field, ModuleToken token) { if (_compilationModuleGroup.VersionsWithType(field.OwningType) && field.OwningType is EcmaType) { // We don't need to store handles within the current compilation group // as we can read them directly from the ECMA objects. return; } TypeDesc owningCanonType = field.OwningType.ConvertToCanonForm(CanonicalFormKind.Specific); FieldDesc canonField = field; if (owningCanonType != field.OwningType) { canonField = CompilerContext.GetFieldForInstantiatedType(field.GetTypicalFieldDefinition(), (InstantiatedType)owningCanonType); } SetModuleTokenForTypeSystemEntity(_fieldToRefTokens, canonField, token); switch (token.TokenType) { case CorTokenType.mdtMemberRef: AddModuleTokenForFieldReference(owningCanonType, token); break; default: throw new NotImplementedException(); } } // Add TypeSystemEntity -> ModuleToken mapping to a ConcurrentDictionary. Using CompareTo sort the token used, so it will // be consistent in all runs of the compiler void SetModuleTokenForTypeSystemEntity<T>(ConcurrentDictionary<T, ModuleToken> dictionary, T tse, ModuleToken token) { if (!dictionary.TryAdd(tse, token)) { ModuleToken oldToken; do { // We will reach here, if the field already has a token if (!dictionary.TryGetValue(tse, out oldToken)) throw new InternalCompilerErrorException("TypeSystemEntity both present and not present in emission dictionary."); if (oldToken.CompareTo(token) <= 0) break; } while (dictionary.TryUpdate(tse, token, oldToken)); } } public void AddModuleTokenForType(TypeDesc type, ModuleToken token) { bool specialTypeFound = false; // Collect underlying type tokens for type specifications if (token.TokenType == CorTokenType.mdtTypeSpec) { TypeSpecification typeSpec = token.MetadataReader.GetTypeSpecification((TypeSpecificationHandle)token.Handle); typeSpec.DecodeSignature(new TokenResolverProvider(this, token.Module), this); specialTypeFound = true; } if (_compilationModuleGroup.VersionsWithType(type)) { // We don't need to store handles within the current compilation group // as we can read them directly from the ECMA objects. return; } if (type is EcmaType ecmaType) { // Don't store typespec tokens where a generic parameter resolves to the type in question if (token.TokenType == CorTokenType.mdtTypeDef || token.TokenType == CorTokenType.mdtTypeRef) { SetModuleTokenForTypeSystemEntity(_typeToRefTokens, ecmaType, token); } } else if (!specialTypeFound) { throw new NotImplementedException(type.ToString()); } } public int GetModuleIndex(EcmaModule module) { return _moduleIndexLookup(module); } /// <summary> /// As of 8/20/2018, recursive propagation of type information through /// the composite signature tree is not needed for anything. We're adding /// a dummy class to clearly indicate what aspects of the resolver need /// changing if the propagation becomes necessary. /// </summary> private class DummyTypeInfo { public static DummyTypeInfo Instance = new DummyTypeInfo(); } private class TokenResolverProvider : ISignatureTypeProvider<DummyTypeInfo, ModuleTokenResolver> { ModuleTokenResolver _resolver; EcmaModule _contextModule; public TokenResolverProvider(ModuleTokenResolver resolver, EcmaModule contextModule) { _resolver = resolver; _contextModule = contextModule; } public DummyTypeInfo GetArrayType(DummyTypeInfo elementType, ArrayShape shape) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetByReferenceType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetFunctionPointerType(MethodSignature<DummyTypeInfo> signature) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericInstantiation(DummyTypeInfo genericType, ImmutableArray<DummyTypeInfo> typeArguments) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericMethodParameter(ModuleTokenResolver genericContext, int index) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetGenericTypeParameter(ModuleTokenResolver genericContext, int index) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetModifiedType(DummyTypeInfo modifier, DummyTypeInfo unmodifiedType, bool isRequired) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPinnedType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPointerType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetPrimitiveType(PrimitiveTypeCode typeCode) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetSZArrayType(DummyTypeInfo elementType) { return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromDefinition(MetadataReader reader, TypeDefinitionHandle handle, byte rawTypeKind) { // Type definition tokens outside of the versioning bubble are useless. return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromReference(MetadataReader reader, TypeReferenceHandle handle, byte rawTypeKind) { _resolver.AddModuleTokenForType((TypeDesc)_contextModule.GetObject(handle), new ModuleToken(_contextModule, handle)); return DummyTypeInfo.Instance; } public DummyTypeInfo GetTypeFromSpecification(MetadataReader reader, ModuleTokenResolver genericContext, TypeSpecificationHandle handle, byte rawTypeKind) { TypeSpecification typeSpec = reader.GetTypeSpecification(handle); typeSpec.DecodeSignature(this, genericContext); return DummyTypeInfo.Instance; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/Common/src/Interop/Windows/Kernel32/Interop.CloseHandle.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Kernel32 { [GeneratedDllImport(Libraries.Kernel32, SetLastError = true)] [return: MarshalAs(UnmanagedType.Bool)] internal static partial bool CloseHandle(IntPtr handle); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Kernel32 { [GeneratedDllImport(Libraries.Kernel32, SetLastError = true)] [return: MarshalAs(UnmanagedType.Bool)] internal static partial bool CloseHandle(IntPtr handle); } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Security.Claims/ref/System.Security.Claims.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Security.Claims { public partial class Claim { public Claim(System.IO.BinaryReader reader) { } public Claim(System.IO.BinaryReader reader, System.Security.Claims.ClaimsIdentity? subject) { } protected Claim(System.Security.Claims.Claim other) { } protected Claim(System.Security.Claims.Claim other, System.Security.Claims.ClaimsIdentity? subject) { } public Claim(string type, string value) { } public Claim(string type, string value, string? valueType) { } public Claim(string type, string value, string? valueType, string? issuer) { } public Claim(string type, string value, string? valueType, string? issuer, string? originalIssuer) { } public Claim(string type, string value, string? valueType, string? issuer, string? originalIssuer, System.Security.Claims.ClaimsIdentity? subject) { } protected virtual byte[]? CustomSerializationData { get { throw null; } } public string Issuer { get { throw null; } } public string OriginalIssuer { get { throw null; } } public System.Collections.Generic.IDictionary<string, string> Properties { get { throw null; } } public System.Security.Claims.ClaimsIdentity? Subject { get { throw null; } } public string Type { get { throw null; } } public string Value { get { throw null; } } public string ValueType { get { throw null; } } public virtual System.Security.Claims.Claim Clone() { throw null; } public virtual System.Security.Claims.Claim Clone(System.Security.Claims.ClaimsIdentity? identity) { throw null; } public override string ToString() { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public partial class ClaimsIdentity : System.Security.Principal.IIdentity { public const string DefaultIssuer = "LOCAL AUTHORITY"; public const string DefaultNameClaimType = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"; public const string DefaultRoleClaimType = "http://schemas.microsoft.com/ws/2008/06/identity/claims/role"; public ClaimsIdentity() { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims) { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType) { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType, string? nameType, string? roleType) { } public ClaimsIdentity(System.IO.BinaryReader reader) { } protected ClaimsIdentity(System.Runtime.Serialization.SerializationInfo info) { } protected ClaimsIdentity(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } protected ClaimsIdentity(System.Security.Claims.ClaimsIdentity other) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity, System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity, System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType, string? nameType, string? roleType) { } public ClaimsIdentity(string? authenticationType) { } public ClaimsIdentity(string? authenticationType, string? nameType, string? roleType) { } public System.Security.Claims.ClaimsIdentity? Actor { get { throw null; } set { } } public virtual string? AuthenticationType { get { throw null; } } public object? BootstrapContext { get { throw null; } set { } } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } protected virtual byte[]? CustomSerializationData { get { throw null; } } public virtual bool IsAuthenticated { get { throw null; } } public string? Label { get { throw null; } set { } } public virtual string? Name { get { throw null; } } public string NameClaimType { get { throw null; } } public string RoleClaimType { get { throw null; } } public virtual void AddClaim(System.Security.Claims.Claim claim) { } public virtual void AddClaims(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim?> claims) { } public virtual System.Security.Claims.ClaimsIdentity Clone() { throw null; } protected virtual System.Security.Claims.Claim CreateClaim(System.IO.BinaryReader reader) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(string type) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(string type) { throw null; } protected virtual void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public virtual bool HasClaim(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual bool HasClaim(string type, string value) { throw null; } public virtual void RemoveClaim(System.Security.Claims.Claim? claim) { } public virtual bool TryRemoveClaim(System.Security.Claims.Claim? claim) { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public partial class ClaimsPrincipal : System.Security.Principal.IPrincipal { public ClaimsPrincipal() { } public ClaimsPrincipal(System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> identities) { } public ClaimsPrincipal(System.IO.BinaryReader reader) { } protected ClaimsPrincipal(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ClaimsPrincipal(System.Security.Principal.IIdentity identity) { } public ClaimsPrincipal(System.Security.Principal.IPrincipal principal) { } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } public static System.Func<System.Security.Claims.ClaimsPrincipal> ClaimsPrincipalSelector { get { throw null; } set { } } public static System.Security.Claims.ClaimsPrincipal? Current { get { throw null; } } protected virtual byte[]? CustomSerializationData { get { throw null; } } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> Identities { get { throw null; } } public virtual System.Security.Principal.IIdentity? Identity { get { throw null; } } public static System.Func<System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity>, System.Security.Claims.ClaimsIdentity?> PrimaryIdentitySelector { get { throw null; } set { } } public virtual void AddIdentities(System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> identities) { } public virtual void AddIdentity(System.Security.Claims.ClaimsIdentity identity) { } public virtual System.Security.Claims.ClaimsPrincipal Clone() { throw null; } protected virtual System.Security.Claims.ClaimsIdentity CreateClaimsIdentity(System.IO.BinaryReader reader) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(string type) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(string type) { throw null; } protected virtual void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public virtual bool HasClaim(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual bool HasClaim(string type, string value) { throw null; } public virtual bool IsInRole(string role) { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public static partial class ClaimTypes { public const string Actor = "http://schemas.xmlsoap.org/ws/2009/09/identity/claims/actor"; public const string Anonymous = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/anonymous"; public const string Authentication = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/authentication"; public const string AuthenticationInstant = "http://schemas.microsoft.com/ws/2008/06/identity/claims/authenticationinstant"; public const string AuthenticationMethod = "http://schemas.microsoft.com/ws/2008/06/identity/claims/authenticationmethod"; public const string AuthorizationDecision = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/authorizationdecision"; public const string CookiePath = "http://schemas.microsoft.com/ws/2008/06/identity/claims/cookiepath"; public const string Country = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/country"; public const string DateOfBirth = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/dateofbirth"; public const string DenyOnlyPrimaryGroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlyprimarygroupsid"; public const string DenyOnlyPrimarySid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlyprimarysid"; public const string DenyOnlySid = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/denyonlysid"; public const string DenyOnlyWindowsDeviceGroup = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlywindowsdevicegroup"; public const string Dns = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/dns"; public const string Dsa = "http://schemas.microsoft.com/ws/2008/06/identity/claims/dsa"; public const string Email = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"; public const string Expiration = "http://schemas.microsoft.com/ws/2008/06/identity/claims/expiration"; public const string Expired = "http://schemas.microsoft.com/ws/2008/06/identity/claims/expired"; public const string Gender = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/gender"; public const string GivenName = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname"; public const string GroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/groupsid"; public const string Hash = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/hash"; public const string HomePhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/homephone"; public const string IsPersistent = "http://schemas.microsoft.com/ws/2008/06/identity/claims/ispersistent"; public const string Locality = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/locality"; public const string MobilePhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/mobilephone"; public const string Name = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"; public const string NameIdentifier = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/nameidentifier"; public const string OtherPhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/otherphone"; public const string PostalCode = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/postalcode"; public const string PrimaryGroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/primarygroupsid"; public const string PrimarySid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/primarysid"; public const string Role = "http://schemas.microsoft.com/ws/2008/06/identity/claims/role"; public const string Rsa = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/rsa"; public const string SerialNumber = "http://schemas.microsoft.com/ws/2008/06/identity/claims/serialnumber"; public const string Sid = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/sid"; public const string Spn = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/spn"; public const string StateOrProvince = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/stateorprovince"; public const string StreetAddress = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/streetaddress"; public const string Surname = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname"; public const string System = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/system"; public const string Thumbprint = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/thumbprint"; public const string Upn = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn"; public const string Uri = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/uri"; public const string UserData = "http://schemas.microsoft.com/ws/2008/06/identity/claims/userdata"; public const string Version = "http://schemas.microsoft.com/ws/2008/06/identity/claims/version"; public const string Webpage = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/webpage"; public const string WindowsAccountName = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname"; public const string WindowsDeviceClaim = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsdeviceclaim"; public const string WindowsDeviceGroup = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsdevicegroup"; public const string WindowsFqbnVersion = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsfqbnversion"; public const string WindowsSubAuthority = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowssubauthority"; public const string WindowsUserClaim = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsuserclaim"; public const string X500DistinguishedName = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/x500distinguishedname"; } public static partial class ClaimValueTypes { public const string Base64Binary = "http://www.w3.org/2001/XMLSchema#base64Binary"; public const string Base64Octet = "http://www.w3.org/2001/XMLSchema#base64Octet"; public const string Boolean = "http://www.w3.org/2001/XMLSchema#boolean"; public const string Date = "http://www.w3.org/2001/XMLSchema#date"; public const string DateTime = "http://www.w3.org/2001/XMLSchema#dateTime"; public const string DaytimeDuration = "http://www.w3.org/TR/2002/WD-xquery-operators-20020816#dayTimeDuration"; public const string DnsName = "http://schemas.xmlsoap.org/claims/dns"; public const string Double = "http://www.w3.org/2001/XMLSchema#double"; public const string DsaKeyValue = "http://www.w3.org/2000/09/xmldsig#DSAKeyValue"; public const string Email = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"; public const string Fqbn = "http://www.w3.org/2001/XMLSchema#fqbn"; public const string HexBinary = "http://www.w3.org/2001/XMLSchema#hexBinary"; public const string Integer = "http://www.w3.org/2001/XMLSchema#integer"; public const string Integer32 = "http://www.w3.org/2001/XMLSchema#integer32"; public const string Integer64 = "http://www.w3.org/2001/XMLSchema#integer64"; public const string KeyInfo = "http://www.w3.org/2000/09/xmldsig#KeyInfo"; public const string Rfc822Name = "urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name"; public const string Rsa = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/rsa"; public const string RsaKeyValue = "http://www.w3.org/2000/09/xmldsig#RSAKeyValue"; public const string Sid = "http://www.w3.org/2001/XMLSchema#sid"; public const string String = "http://www.w3.org/2001/XMLSchema#string"; public const string Time = "http://www.w3.org/2001/XMLSchema#time"; public const string UInteger32 = "http://www.w3.org/2001/XMLSchema#uinteger32"; public const string UInteger64 = "http://www.w3.org/2001/XMLSchema#uinteger64"; public const string UpnName = "http://schemas.xmlsoap.org/claims/UPN"; public const string X500Name = "urn:oasis:names:tc:xacml:1.0:data-type:x500Name"; public const string YearMonthDuration = "http://www.w3.org/TR/2002/WD-xquery-operators-20020816#yearMonthDuration"; } } namespace System.Security.Principal { public partial class GenericIdentity : System.Security.Claims.ClaimsIdentity { protected GenericIdentity(System.Security.Principal.GenericIdentity identity) { } public GenericIdentity(string name) { } public GenericIdentity(string name, string type) { } public override string AuthenticationType { get { throw null; } } public override System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } public override bool IsAuthenticated { get { throw null; } } public override string Name { get { throw null; } } public override System.Security.Claims.ClaimsIdentity Clone() { throw null; } } public partial class GenericPrincipal : System.Security.Claims.ClaimsPrincipal { public GenericPrincipal(System.Security.Principal.IIdentity identity, string[]? roles) { } public override System.Security.Principal.IIdentity Identity { get { throw null; } } public override bool IsInRole([System.Diagnostics.CodeAnalysis.NotNullWhenAttribute(true)] string? role) { throw null; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Security.Claims { public partial class Claim { public Claim(System.IO.BinaryReader reader) { } public Claim(System.IO.BinaryReader reader, System.Security.Claims.ClaimsIdentity? subject) { } protected Claim(System.Security.Claims.Claim other) { } protected Claim(System.Security.Claims.Claim other, System.Security.Claims.ClaimsIdentity? subject) { } public Claim(string type, string value) { } public Claim(string type, string value, string? valueType) { } public Claim(string type, string value, string? valueType, string? issuer) { } public Claim(string type, string value, string? valueType, string? issuer, string? originalIssuer) { } public Claim(string type, string value, string? valueType, string? issuer, string? originalIssuer, System.Security.Claims.ClaimsIdentity? subject) { } protected virtual byte[]? CustomSerializationData { get { throw null; } } public string Issuer { get { throw null; } } public string OriginalIssuer { get { throw null; } } public System.Collections.Generic.IDictionary<string, string> Properties { get { throw null; } } public System.Security.Claims.ClaimsIdentity? Subject { get { throw null; } } public string Type { get { throw null; } } public string Value { get { throw null; } } public string ValueType { get { throw null; } } public virtual System.Security.Claims.Claim Clone() { throw null; } public virtual System.Security.Claims.Claim Clone(System.Security.Claims.ClaimsIdentity? identity) { throw null; } public override string ToString() { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public partial class ClaimsIdentity : System.Security.Principal.IIdentity { public const string DefaultIssuer = "LOCAL AUTHORITY"; public const string DefaultNameClaimType = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"; public const string DefaultRoleClaimType = "http://schemas.microsoft.com/ws/2008/06/identity/claims/role"; public ClaimsIdentity() { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims) { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType) { } public ClaimsIdentity(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType, string? nameType, string? roleType) { } public ClaimsIdentity(System.IO.BinaryReader reader) { } protected ClaimsIdentity(System.Runtime.Serialization.SerializationInfo info) { } protected ClaimsIdentity(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } protected ClaimsIdentity(System.Security.Claims.ClaimsIdentity other) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity, System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims) { } public ClaimsIdentity(System.Security.Principal.IIdentity? identity, System.Collections.Generic.IEnumerable<System.Security.Claims.Claim>? claims, string? authenticationType, string? nameType, string? roleType) { } public ClaimsIdentity(string? authenticationType) { } public ClaimsIdentity(string? authenticationType, string? nameType, string? roleType) { } public System.Security.Claims.ClaimsIdentity? Actor { get { throw null; } set { } } public virtual string? AuthenticationType { get { throw null; } } public object? BootstrapContext { get { throw null; } set { } } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } protected virtual byte[]? CustomSerializationData { get { throw null; } } public virtual bool IsAuthenticated { get { throw null; } } public string? Label { get { throw null; } set { } } public virtual string? Name { get { throw null; } } public string NameClaimType { get { throw null; } } public string RoleClaimType { get { throw null; } } public virtual void AddClaim(System.Security.Claims.Claim claim) { } public virtual void AddClaims(System.Collections.Generic.IEnumerable<System.Security.Claims.Claim?> claims) { } public virtual System.Security.Claims.ClaimsIdentity Clone() { throw null; } protected virtual System.Security.Claims.Claim CreateClaim(System.IO.BinaryReader reader) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(string type) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(string type) { throw null; } protected virtual void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public virtual bool HasClaim(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual bool HasClaim(string type, string value) { throw null; } public virtual void RemoveClaim(System.Security.Claims.Claim? claim) { } public virtual bool TryRemoveClaim(System.Security.Claims.Claim? claim) { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public partial class ClaimsPrincipal : System.Security.Principal.IPrincipal { public ClaimsPrincipal() { } public ClaimsPrincipal(System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> identities) { } public ClaimsPrincipal(System.IO.BinaryReader reader) { } protected ClaimsPrincipal(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public ClaimsPrincipal(System.Security.Principal.IIdentity identity) { } public ClaimsPrincipal(System.Security.Principal.IPrincipal principal) { } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } public static System.Func<System.Security.Claims.ClaimsPrincipal> ClaimsPrincipalSelector { get { throw null; } set { } } public static System.Security.Claims.ClaimsPrincipal? Current { get { throw null; } } protected virtual byte[]? CustomSerializationData { get { throw null; } } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> Identities { get { throw null; } } public virtual System.Security.Principal.IIdentity? Identity { get { throw null; } } public static System.Func<System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity>, System.Security.Claims.ClaimsIdentity?> PrimaryIdentitySelector { get { throw null; } set { } } public virtual void AddIdentities(System.Collections.Generic.IEnumerable<System.Security.Claims.ClaimsIdentity> identities) { } public virtual void AddIdentity(System.Security.Claims.ClaimsIdentity identity) { } public virtual System.Security.Claims.ClaimsPrincipal Clone() { throw null; } protected virtual System.Security.Claims.ClaimsIdentity CreateClaimsIdentity(System.IO.BinaryReader reader) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> FindAll(string type) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual System.Security.Claims.Claim? FindFirst(string type) { throw null; } protected virtual void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context) { } public virtual bool HasClaim(System.Predicate<System.Security.Claims.Claim> match) { throw null; } public virtual bool HasClaim(string type, string value) { throw null; } public virtual bool IsInRole(string role) { throw null; } public virtual void WriteTo(System.IO.BinaryWriter writer) { } protected virtual void WriteTo(System.IO.BinaryWriter writer, byte[]? userData) { } } public static partial class ClaimTypes { public const string Actor = "http://schemas.xmlsoap.org/ws/2009/09/identity/claims/actor"; public const string Anonymous = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/anonymous"; public const string Authentication = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/authentication"; public const string AuthenticationInstant = "http://schemas.microsoft.com/ws/2008/06/identity/claims/authenticationinstant"; public const string AuthenticationMethod = "http://schemas.microsoft.com/ws/2008/06/identity/claims/authenticationmethod"; public const string AuthorizationDecision = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/authorizationdecision"; public const string CookiePath = "http://schemas.microsoft.com/ws/2008/06/identity/claims/cookiepath"; public const string Country = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/country"; public const string DateOfBirth = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/dateofbirth"; public const string DenyOnlyPrimaryGroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlyprimarygroupsid"; public const string DenyOnlyPrimarySid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlyprimarysid"; public const string DenyOnlySid = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/denyonlysid"; public const string DenyOnlyWindowsDeviceGroup = "http://schemas.microsoft.com/ws/2008/06/identity/claims/denyonlywindowsdevicegroup"; public const string Dns = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/dns"; public const string Dsa = "http://schemas.microsoft.com/ws/2008/06/identity/claims/dsa"; public const string Email = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"; public const string Expiration = "http://schemas.microsoft.com/ws/2008/06/identity/claims/expiration"; public const string Expired = "http://schemas.microsoft.com/ws/2008/06/identity/claims/expired"; public const string Gender = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/gender"; public const string GivenName = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname"; public const string GroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/groupsid"; public const string Hash = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/hash"; public const string HomePhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/homephone"; public const string IsPersistent = "http://schemas.microsoft.com/ws/2008/06/identity/claims/ispersistent"; public const string Locality = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/locality"; public const string MobilePhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/mobilephone"; public const string Name = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"; public const string NameIdentifier = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/nameidentifier"; public const string OtherPhone = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/otherphone"; public const string PostalCode = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/postalcode"; public const string PrimaryGroupSid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/primarygroupsid"; public const string PrimarySid = "http://schemas.microsoft.com/ws/2008/06/identity/claims/primarysid"; public const string Role = "http://schemas.microsoft.com/ws/2008/06/identity/claims/role"; public const string Rsa = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/rsa"; public const string SerialNumber = "http://schemas.microsoft.com/ws/2008/06/identity/claims/serialnumber"; public const string Sid = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/sid"; public const string Spn = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/spn"; public const string StateOrProvince = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/stateorprovince"; public const string StreetAddress = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/streetaddress"; public const string Surname = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname"; public const string System = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/system"; public const string Thumbprint = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/thumbprint"; public const string Upn = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn"; public const string Uri = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/uri"; public const string UserData = "http://schemas.microsoft.com/ws/2008/06/identity/claims/userdata"; public const string Version = "http://schemas.microsoft.com/ws/2008/06/identity/claims/version"; public const string Webpage = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/webpage"; public const string WindowsAccountName = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname"; public const string WindowsDeviceClaim = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsdeviceclaim"; public const string WindowsDeviceGroup = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsdevicegroup"; public const string WindowsFqbnVersion = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsfqbnversion"; public const string WindowsSubAuthority = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowssubauthority"; public const string WindowsUserClaim = "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsuserclaim"; public const string X500DistinguishedName = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/x500distinguishedname"; } public static partial class ClaimValueTypes { public const string Base64Binary = "http://www.w3.org/2001/XMLSchema#base64Binary"; public const string Base64Octet = "http://www.w3.org/2001/XMLSchema#base64Octet"; public const string Boolean = "http://www.w3.org/2001/XMLSchema#boolean"; public const string Date = "http://www.w3.org/2001/XMLSchema#date"; public const string DateTime = "http://www.w3.org/2001/XMLSchema#dateTime"; public const string DaytimeDuration = "http://www.w3.org/TR/2002/WD-xquery-operators-20020816#dayTimeDuration"; public const string DnsName = "http://schemas.xmlsoap.org/claims/dns"; public const string Double = "http://www.w3.org/2001/XMLSchema#double"; public const string DsaKeyValue = "http://www.w3.org/2000/09/xmldsig#DSAKeyValue"; public const string Email = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"; public const string Fqbn = "http://www.w3.org/2001/XMLSchema#fqbn"; public const string HexBinary = "http://www.w3.org/2001/XMLSchema#hexBinary"; public const string Integer = "http://www.w3.org/2001/XMLSchema#integer"; public const string Integer32 = "http://www.w3.org/2001/XMLSchema#integer32"; public const string Integer64 = "http://www.w3.org/2001/XMLSchema#integer64"; public const string KeyInfo = "http://www.w3.org/2000/09/xmldsig#KeyInfo"; public const string Rfc822Name = "urn:oasis:names:tc:xacml:1.0:data-type:rfc822Name"; public const string Rsa = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/rsa"; public const string RsaKeyValue = "http://www.w3.org/2000/09/xmldsig#RSAKeyValue"; public const string Sid = "http://www.w3.org/2001/XMLSchema#sid"; public const string String = "http://www.w3.org/2001/XMLSchema#string"; public const string Time = "http://www.w3.org/2001/XMLSchema#time"; public const string UInteger32 = "http://www.w3.org/2001/XMLSchema#uinteger32"; public const string UInteger64 = "http://www.w3.org/2001/XMLSchema#uinteger64"; public const string UpnName = "http://schemas.xmlsoap.org/claims/UPN"; public const string X500Name = "urn:oasis:names:tc:xacml:1.0:data-type:x500Name"; public const string YearMonthDuration = "http://www.w3.org/TR/2002/WD-xquery-operators-20020816#yearMonthDuration"; } } namespace System.Security.Principal { public partial class GenericIdentity : System.Security.Claims.ClaimsIdentity { protected GenericIdentity(System.Security.Principal.GenericIdentity identity) { } public GenericIdentity(string name) { } public GenericIdentity(string name, string type) { } public override string AuthenticationType { get { throw null; } } public override System.Collections.Generic.IEnumerable<System.Security.Claims.Claim> Claims { get { throw null; } } public override bool IsAuthenticated { get { throw null; } } public override string Name { get { throw null; } } public override System.Security.Claims.ClaimsIdentity Clone() { throw null; } } public partial class GenericPrincipal : System.Security.Claims.ClaimsPrincipal { public GenericPrincipal(System.Security.Principal.IIdentity identity, string[]? roles) { } public override System.Security.Principal.IIdentity Identity { get { throw null; } } public override bool IsInRole([System.Diagnostics.CodeAnalysis.NotNullWhenAttribute(true)] string? role) { throw null; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/installer/tests/Assets/TestProjects/StartupHookWithReturnType/StartupHookWithReturnType.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal class StartupHook { public static int Initialize() { // This hook should not be called because it doesn't have a // void return type. Instead, the startup hook provider code // should throw an exception. Console.WriteLine("Hello from startup hook returning int!"); return 10; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; internal class StartupHook { public static int Initialize() { // This hook should not be called because it doesn't have a // void return type. Instead, the startup hook provider code // should throw an exception. Console.WriteLine("Hello from startup hook returning int!"); return 10; } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Runtime/tests/System/Reflection/MethodBodyTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using Xunit; #pragma warning disable 0219 // field is never used namespace System.Reflection.Tests { public static class MethodBodyTests { [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsMethodBodySupported))] public static void Test_MethodBody_ExceptionHandlingClause() { MethodInfo mi = typeof(MethodBodyTests).GetMethod("MethodBodyExample", BindingFlags.NonPublic | BindingFlags.Static); MethodBody mb = mi.GetMethodBody(); Assert.True(mb.InitLocals); // local variables are initialized #if DEBUG Assert.Equal(2, mb.MaxStackSize); Assert.Equal(5, mb.LocalVariables.Count); foreach (LocalVariableInfo lvi in mb.LocalVariables) { if (lvi.LocalIndex == 0) { Assert.Equal(typeof(int), lvi.LocalType); } if (lvi.LocalIndex == 1) { Assert.Equal(typeof(string), lvi.LocalType); } if (lvi.LocalIndex == 2) { Assert.Equal(typeof(bool), lvi.LocalType); } if (lvi.LocalIndex == 3) { Assert.Equal(typeof(bool), lvi.LocalType); } if (lvi.LocalIndex == 4) { Assert.Equal(typeof(Exception), lvi.LocalType); } } foreach (ExceptionHandlingClause ehc in mb.ExceptionHandlingClauses) { if (ehc.Flags != ExceptionHandlingClauseOptions.Finally && ehc.Flags != ExceptionHandlingClauseOptions.Filter) { Assert.Equal(typeof(Exception), ehc.CatchType); Assert.Equal(19, ehc.HandlerLength); Assert.Equal(70, ehc.HandlerOffset); Assert.Equal(61, ehc.TryLength); Assert.Equal(9, ehc.TryOffset); return; } } #else Assert.Equal(2, mb.MaxStackSize); Assert.Equal(3, mb.LocalVariables.Count); foreach (LocalVariableInfo lvi in mb.LocalVariables) { if (lvi.LocalIndex == 0) { Assert.Equal(typeof(int), lvi.LocalType); } if (lvi.LocalIndex == 1) { Assert.Equal(typeof(string), lvi.LocalType); } if (lvi.LocalIndex == 2) { Assert.Equal(typeof(Exception), lvi.LocalType); } } foreach (ExceptionHandlingClause ehc in mb.ExceptionHandlingClauses) { if (ehc.Flags != ExceptionHandlingClauseOptions.Finally && ehc.Flags != ExceptionHandlingClauseOptions.Filter) { Assert.Equal(typeof(Exception), ehc.CatchType); Assert.Equal(14, ehc.HandlerLength); Assert.Equal(58, ehc.HandlerOffset); Assert.Equal(50, ehc.TryLength); Assert.Equal(8, ehc.TryOffset); return; } } #endif Assert.True(false, "Expected to find CatchType clause."); } private static void MethodBodyExample(object arg) { int var1 = 2; string var2 = "I am a string"; try { if (arg == null) { throw new ArgumentNullException("Input argument cannot be null."); } if (arg.GetType() == typeof(string)) { throw new ArgumentException("Input argument cannot be a string."); } } catch (Exception ex) { Console.WriteLine(ex.Message); } finally { var1 = 3; var2 = "I am a new string!"; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using Xunit; #pragma warning disable 0219 // field is never used namespace System.Reflection.Tests { public static class MethodBodyTests { [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsMethodBodySupported))] public static void Test_MethodBody_ExceptionHandlingClause() { MethodInfo mi = typeof(MethodBodyTests).GetMethod("MethodBodyExample", BindingFlags.NonPublic | BindingFlags.Static); MethodBody mb = mi.GetMethodBody(); Assert.True(mb.InitLocals); // local variables are initialized #if DEBUG Assert.Equal(2, mb.MaxStackSize); Assert.Equal(5, mb.LocalVariables.Count); foreach (LocalVariableInfo lvi in mb.LocalVariables) { if (lvi.LocalIndex == 0) { Assert.Equal(typeof(int), lvi.LocalType); } if (lvi.LocalIndex == 1) { Assert.Equal(typeof(string), lvi.LocalType); } if (lvi.LocalIndex == 2) { Assert.Equal(typeof(bool), lvi.LocalType); } if (lvi.LocalIndex == 3) { Assert.Equal(typeof(bool), lvi.LocalType); } if (lvi.LocalIndex == 4) { Assert.Equal(typeof(Exception), lvi.LocalType); } } foreach (ExceptionHandlingClause ehc in mb.ExceptionHandlingClauses) { if (ehc.Flags != ExceptionHandlingClauseOptions.Finally && ehc.Flags != ExceptionHandlingClauseOptions.Filter) { Assert.Equal(typeof(Exception), ehc.CatchType); Assert.Equal(19, ehc.HandlerLength); Assert.Equal(70, ehc.HandlerOffset); Assert.Equal(61, ehc.TryLength); Assert.Equal(9, ehc.TryOffset); return; } } #else Assert.Equal(2, mb.MaxStackSize); Assert.Equal(3, mb.LocalVariables.Count); foreach (LocalVariableInfo lvi in mb.LocalVariables) { if (lvi.LocalIndex == 0) { Assert.Equal(typeof(int), lvi.LocalType); } if (lvi.LocalIndex == 1) { Assert.Equal(typeof(string), lvi.LocalType); } if (lvi.LocalIndex == 2) { Assert.Equal(typeof(Exception), lvi.LocalType); } } foreach (ExceptionHandlingClause ehc in mb.ExceptionHandlingClauses) { if (ehc.Flags != ExceptionHandlingClauseOptions.Finally && ehc.Flags != ExceptionHandlingClauseOptions.Filter) { Assert.Equal(typeof(Exception), ehc.CatchType); Assert.Equal(14, ehc.HandlerLength); Assert.Equal(58, ehc.HandlerOffset); Assert.Equal(50, ehc.TryLength); Assert.Equal(8, ehc.TryOffset); return; } } #endif Assert.True(false, "Expected to find CatchType clause."); } private static void MethodBodyExample(object arg) { int var1 = 2; string var2 = "I am a string"; try { if (arg == null) { throw new ArgumentNullException("Input argument cannot be null."); } if (arg.GetType() == typeof(string)) { throw new ArgumentException("Input argument cannot be a string."); } } catch (Exception ex) { Console.WriteLine(ex.Message); } finally { var1 = 3; var2 = "I am a new string!"; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/HardwareIntrinsics/General/Vector256_1/op_UnaryPlus.Double.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_UnaryPlusDouble() { var test = new VectorUnaryOpTest__op_UnaryPlusDouble(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__op_UnaryPlusDouble { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Double> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__op_UnaryPlusDouble testClass) { var result = +_fld1; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Vector256<Double> _clsVar1; private Vector256<Double> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__op_UnaryPlusDouble() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); } public VectorUnaryOpTest__op_UnaryPlusDouble() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = +Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector256<Double>).GetMethod("op_UnaryPlus", new Type[] { typeof(Vector256<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = +_clsVar1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); var result = +op1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__op_UnaryPlusDouble(); var result = +test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = +_fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = +test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Double> op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Double[] firstOp, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (double)(+firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (double)(+firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_UnaryPlus<Double>(Vector256<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_UnaryPlusDouble() { var test = new VectorUnaryOpTest__op_UnaryPlusDouble(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorUnaryOpTest__op_UnaryPlusDouble { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Double[] inArray1, Double[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Double>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Double, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Double> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref testStruct._fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); return testStruct; } public void RunStructFldScenario(VectorUnaryOpTest__op_UnaryPlusDouble testClass) { var result = +_fld1; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double); private static Double[] _data1 = new Double[Op1ElementCount]; private static Vector256<Double> _clsVar1; private Vector256<Double> _fld1; private DataTable _dataTable; static VectorUnaryOpTest__op_UnaryPlusDouble() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _clsVar1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); } public VectorUnaryOpTest__op_UnaryPlusDouble() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Double>, byte>(ref _fld1), ref Unsafe.As<Double, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Double>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); } _dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = +Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector256<Double>).GetMethod("op_UnaryPlus", new Type[] { typeof(Vector256<Double>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = +_clsVar1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Double>>(_dataTable.inArray1Ptr); var result = +op1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorUnaryOpTest__op_UnaryPlusDouble(); var result = +test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = +_fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = +test._fld1; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector256<Double> op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Double[] inArray1 = new Double[Op1ElementCount]; Double[] outArray = new Double[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Double>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Double[] firstOp, Double[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (double)(+firstOp[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (double)(+firstOp[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_UnaryPlus<Double>(Vector256<Double>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/LCIDConversionAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Runtime.InteropServices { [AttributeUsage(AttributeTargets.Method, Inherited = false)] public sealed class LCIDConversionAttribute : Attribute { public LCIDConversionAttribute(int lcid) { Value = lcid; } public int Value { get; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Runtime.InteropServices { [AttributeUsage(AttributeTargets.Method, Inherited = false)] public sealed class LCIDConversionAttribute : Attribute { public LCIDConversionAttribute(int lcid) { Value = lcid; } public int Value { get; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/HardwareIntrinsics/General/Vector128/EqualsAll.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void EqualsAllInt32() { var test = new VectorBooleanBinaryOpTest__EqualsAllInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAllInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAllInt32 testClass) { var result = Vector128.EqualsAll(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector128<Int32> _fld1; private Vector128<Int32> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__EqualsAllInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public VectorBooleanBinaryOpTest__EqualsAllInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.EqualsAll( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAll), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAll), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.EqualsAll( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = Vector128.EqualsAll(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__EqualsAllInt32(); var result = Vector128.EqualsAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.EqualsAll(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.EqualsAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, bool result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Int32[] left, Int32[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= (left[i] == right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.EqualsAll)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void EqualsAllInt32() { var test = new VectorBooleanBinaryOpTest__EqualsAllInt32(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAllInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAllInt32 testClass) { var result = Vector128.EqualsAll(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector128<Int32> _fld1; private Vector128<Int32> _fld2; private DataTable _dataTable; static VectorBooleanBinaryOpTest__EqualsAllInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public VectorBooleanBinaryOpTest__EqualsAllInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.EqualsAll( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAll), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAll), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(Int32)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.EqualsAll( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = Vector128.EqualsAll(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBooleanBinaryOpTest__EqualsAllInt32(); var result = Vector128.EqualsAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.EqualsAll(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.EqualsAll(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, bool result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(Int32[] left, Int32[] right, bool result, [CallerMemberName] string method = "") { bool succeeded = true; var expectedResult = true; for (var i = 0; i < Op1ElementCount; i++) { expectedResult &= (left[i] == right[i]); } succeeded = (expectedResult == result); if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.EqualsAll)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({result})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/Serialization/NumberHandlingTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; using System.Collections.Immutable; using System.Collections.ObjectModel; using System.Globalization; using System.IO; using System.Linq; using System.Text.Encodings.Web; using System.Text.Json.Tests; using System.Threading.Tasks; using Xunit; namespace System.Text.Json.Serialization.Tests { public static partial class NumberHandlingTests { private static readonly JsonSerializerOptions s_optionReadFromStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString }; private static readonly JsonSerializerOptions s_optionWriteAsStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.WriteAsString }; private static readonly JsonSerializerOptions s_optionReadAndWriteFromStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString }; private static readonly JsonSerializerOptions s_optionsAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowNamedFloatingPointLiterals }; private static readonly JsonSerializerOptions s_optionReadFromStrAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.AllowNamedFloatingPointLiterals }; private static readonly JsonSerializerOptions s_optionWriteAsStrAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.WriteAsString | JsonNumberHandling.AllowNamedFloatingPointLiterals }; [Fact] public static void Number_AsRootType_RoundTrip() { RunAsRootTypeTest(JsonNumberTestData.Bytes); RunAsRootTypeTest(JsonNumberTestData.SBytes); RunAsRootTypeTest(JsonNumberTestData.Shorts); RunAsRootTypeTest(JsonNumberTestData.Ints); RunAsRootTypeTest(JsonNumberTestData.Longs); RunAsRootTypeTest(JsonNumberTestData.UShorts); RunAsRootTypeTest(JsonNumberTestData.UInts); RunAsRootTypeTest(JsonNumberTestData.ULongs); RunAsRootTypeTest(JsonNumberTestData.Floats); RunAsRootTypeTest(JsonNumberTestData.Doubles); RunAsRootTypeTest(JsonNumberTestData.Decimals); RunAsRootTypeTest(JsonNumberTestData.NullableBytes); RunAsRootTypeTest(JsonNumberTestData.NullableSBytes); RunAsRootTypeTest(JsonNumberTestData.NullableShorts); RunAsRootTypeTest(JsonNumberTestData.NullableInts); RunAsRootTypeTest(JsonNumberTestData.NullableLongs); RunAsRootTypeTest(JsonNumberTestData.NullableUShorts); RunAsRootTypeTest(JsonNumberTestData.NullableUInts); RunAsRootTypeTest(JsonNumberTestData.NullableULongs); RunAsRootTypeTest(JsonNumberTestData.NullableFloats); RunAsRootTypeTest(JsonNumberTestData.NullableDoubles); RunAsRootTypeTest(JsonNumberTestData.NullableDecimals); } private static void RunAsRootTypeTest<T>(List<T> numbers) { foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string json = $"{numberAsString}"; string jsonWithNumberAsString = @$"""{numberAsString}"""; PerformAsRootTypeSerialization(number, json, jsonWithNumberAsString); } } private static string GetNumberAsString<T>(T number) { return number switch { double @double => @double.ToString(JsonTestHelper.DoubleFormatString, CultureInfo.InvariantCulture), float @float => @float.ToString(JsonTestHelper.SingleFormatString, CultureInfo.InvariantCulture), decimal @decimal => @decimal.ToString(CultureInfo.InvariantCulture), _ => number.ToString() }; } private static void PerformAsRootTypeSerialization<T>(T number, string jsonWithNumberAsNumber, string jsonWithNumberAsString) { // Option: read from string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionReadFromStr)); Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionReadFromStr)); // Serialize Assert.Equal(jsonWithNumberAsNumber, JsonSerializer.Serialize(number, s_optionReadFromStr)); // Option: write as string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionWriteAsStr)); // Serialize Assert.Equal(jsonWithNumberAsString, JsonSerializer.Serialize(number, s_optionWriteAsStr)); // Option: read and write from/to string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionReadAndWriteFromStr)); Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionReadAndWriteFromStr)); // Serialize Assert.Equal(jsonWithNumberAsString, JsonSerializer.Serialize(number, s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsBoxed_RootType() { string numberAsString = @"""2"""; int @int = 2; float @float = 2; int? nullableInt = 2; float? nullableFloat = 2; Assert.Equal(numberAsString, JsonSerializer.Serialize((object)@int, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)@float, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)nullableInt, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)nullableFloat, s_optionReadAndWriteFromStr)); Assert.Equal(2, (int)JsonSerializer.Deserialize(numberAsString, typeof(int), s_optionReadAndWriteFromStr)); Assert.Equal(2, (float)JsonSerializer.Deserialize(numberAsString, typeof(float), s_optionReadAndWriteFromStr)); Assert.Equal(2, (int?)JsonSerializer.Deserialize(numberAsString, typeof(int?), s_optionReadAndWriteFromStr)); Assert.Equal(2, (float?)JsonSerializer.Deserialize(numberAsString, typeof(float?), s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsBoxed_Property() { int @int = 1; float? nullableFloat = 2; string expected = @"{""MyInt"":""1"",""MyNullableFloat"":""2""}"; var obj = new Class_With_BoxedNumbers { MyInt = @int, MyNullableFloat = nullableFloat }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_BoxedNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyInt); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("1", el.GetString()); el = Assert.IsType<JsonElement>(obj.MyNullableFloat); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } public class Class_With_BoxedNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyInt { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyNullableFloat { get; set; } } [Fact] public static void Number_AsBoxed_CollectionRootType_Element() { int @int = 1; float? nullableFloat = 2; string expected = @"[""1""]"; var obj = new List<object> { @int }; string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj = JsonSerializer.Deserialize<List<object>>(serialized, s_optionReadAndWriteFromStr); JsonElement el = Assert.IsType<JsonElement>(obj[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("1", el.GetString()); expected = @"[""2""]"; IList obj2 = new object[] { nullableFloat }; serialized = JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj2 = JsonSerializer.Deserialize<IList>(serialized, s_optionReadAndWriteFromStr); el = Assert.IsType<JsonElement>(obj2[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } [Fact] public static void Number_AsBoxed_CollectionProperty_Element() { int @int = 2; float? nullableFloat = 2; string expected = @"{""MyInts"":[""2""],""MyNullableFloats"":[""2""]}"; var obj = new Class_With_ListsOfBoxedNumbers { MyInts = new List<object> { @int }, MyNullableFloats = new object[] { nullableFloat } }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_ListsOfBoxedNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyInts[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); el = Assert.IsType<JsonElement>(obj.MyNullableFloats[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } public class Class_With_ListsOfBoxedNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<object> MyInts { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public IList MyNullableFloats { get; set; } } [Fact] public static void NonNumber_AsBoxed_Property() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"{{""MyDateTime"":{JsonSerializer.Serialize(dateTime)},""MyNullableGuid"":{JsonSerializer.Serialize(nullableGuid)}}}"; var obj = new Class_With_BoxedNonNumbers { MyDateTime = dateTime, MyNullableGuid = nullableGuid }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_BoxedNonNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyDateTime); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); el = Assert.IsType<JsonElement>(obj.MyNullableGuid); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid.Value, el.GetGuid()); } public class Class_With_BoxedNonNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyDateTime { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyNullableGuid { get; set; } } [Fact] public static void NonNumber_AsBoxed_CollectionRootType_Element() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"[{JsonSerializer.Serialize(dateTime)}]"; var obj = new List<object> { dateTime }; string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj = JsonSerializer.Deserialize<List<object>>(serialized, s_optionReadAndWriteFromStr); JsonElement el = Assert.IsType<JsonElement>(obj[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); expected = @$"[{JsonSerializer.Serialize(nullableGuid)}]"; IList obj2 = new object[] { nullableGuid }; serialized = JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj2 = JsonSerializer.Deserialize<IList>(serialized, s_optionReadAndWriteFromStr); el = Assert.IsType<JsonElement>(obj2[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid.Value, el.GetGuid()); } [Fact] public static void NonNumber_AsBoxed_CollectionProperty_Element() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"{{""MyDateTimes"":[{JsonSerializer.Serialize(dateTime)}],""MyNullableGuids"":[{JsonSerializer.Serialize(nullableGuid)}]}}"; var obj = new Class_With_ListsOfBoxedNonNumbers { MyDateTimes = new List<object> { dateTime }, MyNullableGuids = new object[] { nullableGuid } }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_ListsOfBoxedNonNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyDateTimes[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); el = Assert.IsType<JsonElement>(obj.MyNullableGuids[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid, el.GetGuid()); } public class Class_With_ListsOfBoxedNonNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<object> MyDateTimes { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public IList MyNullableGuids { get; set; } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/49936", TestPlatforms.Android)] public static void Number_AsCollectionElement_RoundTrip() { RunAsCollectionElementTest(JsonNumberTestData.Bytes); RunAsCollectionElementTest(JsonNumberTestData.SBytes); RunAsCollectionElementTest(JsonNumberTestData.Shorts); RunAsCollectionElementTest(JsonNumberTestData.Ints); RunAsCollectionElementTest(JsonNumberTestData.Longs); RunAsCollectionElementTest(JsonNumberTestData.UShorts); RunAsCollectionElementTest(JsonNumberTestData.UInts); RunAsCollectionElementTest(JsonNumberTestData.ULongs); RunAsCollectionElementTest(JsonNumberTestData.Floats); RunAsCollectionElementTest(JsonNumberTestData.Doubles); RunAsCollectionElementTest(JsonNumberTestData.Decimals); RunAsCollectionElementTest(JsonNumberTestData.NullableBytes); RunAsCollectionElementTest(JsonNumberTestData.NullableSBytes); RunAsCollectionElementTest(JsonNumberTestData.NullableShorts); RunAsCollectionElementTest(JsonNumberTestData.NullableInts); RunAsCollectionElementTest(JsonNumberTestData.NullableLongs); RunAsCollectionElementTest(JsonNumberTestData.NullableUShorts); RunAsCollectionElementTest(JsonNumberTestData.NullableUInts); RunAsCollectionElementTest(JsonNumberTestData.NullableULongs); RunAsCollectionElementTest(JsonNumberTestData.NullableFloats); RunAsCollectionElementTest(JsonNumberTestData.NullableDoubles); RunAsCollectionElementTest(JsonNumberTestData.NullableDecimals); } private static void RunAsCollectionElementTest<T>(List<T> numbers) { StringBuilder jsonBuilder_NumbersAsNumbers = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsStrings = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsNumbersAndStrings = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsNumbersAndStrings_Alternate = new StringBuilder(); bool asNumber = false; jsonBuilder_NumbersAsNumbers.Append("["); jsonBuilder_NumbersAsStrings.Append("["); jsonBuilder_NumbersAsNumbersAndStrings.Append("["); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append("["); foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string jsonWithNumberAsString = @$"""{numberAsString}"""; jsonBuilder_NumbersAsNumbers.Append($"{numberAsString},"); jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString},"); jsonBuilder_NumbersAsNumbersAndStrings.Append(asNumber ? $"{numberAsString}," : $"{jsonWithNumberAsString},"); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append(!asNumber ? $"{numberAsString}," : $"{jsonWithNumberAsString},"); asNumber = !asNumber; } jsonBuilder_NumbersAsNumbers.Remove(jsonBuilder_NumbersAsNumbers.Length - 1, 1); jsonBuilder_NumbersAsStrings.Remove(jsonBuilder_NumbersAsStrings.Length - 1, 1); jsonBuilder_NumbersAsNumbersAndStrings.Remove(jsonBuilder_NumbersAsNumbersAndStrings.Length - 1, 1); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Remove(jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Length - 1, 1); jsonBuilder_NumbersAsNumbers.Append("]"); jsonBuilder_NumbersAsStrings.Append("]"); jsonBuilder_NumbersAsNumbersAndStrings.Append("]"); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append("]"); string jsonNumbersAsStrings = jsonBuilder_NumbersAsStrings.ToString(); PerformAsCollectionElementSerialization( numbers, jsonBuilder_NumbersAsNumbers.ToString(), jsonNumbersAsStrings, jsonBuilder_NumbersAsNumbersAndStrings.ToString(), jsonBuilder_NumbersAsNumbersAndStrings_Alternate.ToString()); // Reflection based tests for every collection type. RunAllCollectionsRoundTripTest<T>(jsonNumbersAsStrings); } private static void PerformAsCollectionElementSerialization<T>( List<T> numbers, string json_NumbersAsNumbers, string json_NumbersAsStrings, string json_NumbersAsNumbersAndStrings, string json_NumbersAsNumbersAndStrings_Alternate) { List<T> deserialized; // Option: read from string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); // Serialize Assert.Equal(json_NumbersAsNumbers, JsonSerializer.Serialize(numbers, s_optionReadFromStr)); // Option: write as string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionWriteAsStr); AssertIEnumerableEqual(numbers, deserialized); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionWriteAsStr)); // Serialize Assert.Equal(json_NumbersAsStrings, JsonSerializer.Serialize(numbers, s_optionWriteAsStr)); // Option: read and write from/to string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); // Serialize Assert.Equal(json_NumbersAsStrings, JsonSerializer.Serialize(numbers, s_optionReadAndWriteFromStr)); } private static void AssertIEnumerableEqual<T>(IEnumerable<T> list1, IEnumerable<T> list2) { IEnumerator<T> enumerator1 = list1.GetEnumerator(); IEnumerator<T> enumerator2 = list2.GetEnumerator(); while (enumerator1.MoveNext()) { enumerator2.MoveNext(); Assert.Equal(enumerator1.Current, enumerator2.Current); } Assert.False(enumerator2.MoveNext()); } private static void RunAllCollectionsRoundTripTest<T>(string json) { foreach (Type type in CollectionTestTypes.DeserializableGenericEnumerableTypes<T>()) { if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(HashSet<>)) { HashSet<T> obj1 = (HashSet<T>)JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); string serialized = JsonSerializer.Serialize(obj1, s_optionReadAndWriteFromStr); HashSet<T> obj2 = (HashSet<T>)JsonSerializer.Deserialize(serialized, type, s_optionReadAndWriteFromStr); Assert.Equal(obj1.Count, obj2.Count); foreach (T element in obj1) { Assert.True(obj2.Contains(element)); } } else if (type != typeof(byte[])) { object obj = JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); } } foreach (Type type in CollectionTestTypes.DeserializableNonGenericEnumerableTypes()) { // Deserialized as collection of JsonElements. object obj = JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); // Serialized as strings with escaping. string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Ensure escaped values were serialized accurately List<T> list = JsonSerializer.Deserialize<List<T>>(serialized, s_optionReadAndWriteFromStr); serialized = JsonSerializer.Serialize(list, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); // Serialize instance which is a collection of numbers (not JsonElements). obj = Activator.CreateInstance(type, new[] { list }); serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); } } [Fact] public static void Number_AsDictionaryElement_RoundTrip() { var dict = new Dictionary<int, float>(); for (int i = 0; i < 10; i++) { dict[JsonNumberTestData.Ints[i]] = JsonNumberTestData.Floats[i]; } // Serialize string serialized = JsonSerializer.Serialize(dict, s_optionReadAndWriteFromStr); AssertDictionaryElements_StringValues(serialized); // Deserialize dict = JsonSerializer.Deserialize<Dictionary<int, float>>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(dict, s_optionReadAndWriteFromStr)); } private static void AssertDictionaryElements_StringValues(string serialized) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(serialized)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndObject) { break; } else if (reader.TokenType == JsonTokenType.String) { Assert.True(reader.ValueSpan.IndexOf((byte)'\\') == -1); } else { Assert.Equal(JsonTokenType.PropertyName, reader.TokenType); } } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/39674", typeof(PlatformDetection), nameof(PlatformDetection.IsMonoInterpreter))] [SkipOnCoreClr("https://github.com/dotnet/runtime/issues/45464", ~RuntimeConfiguration.Release)] public static void DictionariesRoundTrip() { RunAllDictionariessRoundTripTest(JsonNumberTestData.ULongs); RunAllDictionariessRoundTripTest(JsonNumberTestData.Floats); RunAllDictionariessRoundTripTest(JsonNumberTestData.Doubles); } private static void RunAllDictionariessRoundTripTest<T>(List<T> numbers) { StringBuilder jsonBuilder_NumbersAsStrings = new StringBuilder(); jsonBuilder_NumbersAsStrings.Append("{"); foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string jsonWithNumberAsString = @$"""{numberAsString}"""; jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString}:"); jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString},"); } jsonBuilder_NumbersAsStrings.Remove(jsonBuilder_NumbersAsStrings.Length - 1, 1); jsonBuilder_NumbersAsStrings.Append("}"); string jsonNumbersAsStrings = jsonBuilder_NumbersAsStrings.ToString(); foreach (Type type in CollectionTestTypes.DeserializableDictionaryTypes<string, T>()) { object obj = JsonSerializer.Deserialize(jsonNumbersAsStrings, type, s_optionReadAndWriteFromStr); JsonTestHelper.AssertJsonEqual(jsonNumbersAsStrings, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } foreach (Type type in CollectionTestTypes.DeserializableNonGenericDictionaryTypes()) { Dictionary<T, T> dict = JsonSerializer.Deserialize<Dictionary<T, T>>(jsonNumbersAsStrings, s_optionReadAndWriteFromStr); // Serialize instance which is a dictionary of numbers (not JsonElements). object obj = Activator.CreateInstance(type, new[] { dict }); string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); JsonTestHelper.AssertJsonEqual(jsonNumbersAsStrings, serialized); } } [Fact] public static void Number_AsPropertyValue_RoundTrip() { var obj = new Class_With_NullableUInt64_And_Float() { NullableUInt64Number = JsonNumberTestData.NullableULongs.LastOrDefault(), FloatNumbers = JsonNumberTestData.Floats }; // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<Class_With_NullableUInt64_And_Float>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class Class_With_NullableUInt64_And_Float { public ulong? NullableUInt64Number { get; set; } [JsonInclude] public List<float> FloatNumbers; } [Fact] public static void Number_AsKeyValuePairValue_RoundTrip() { var obj = new KeyValuePair<ulong?, List<float>>(JsonNumberTestData.NullableULongs.LastOrDefault(), JsonNumberTestData.Floats); // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<KeyValuePair<ulong?, List<float>>>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsObjectWithParameterizedCtor_RoundTrip() { var obj = new MyClassWithNumbers(JsonNumberTestData.NullableULongs.LastOrDefault(), JsonNumberTestData.Floats); // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<MyClassWithNumbers>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class MyClassWithNumbers { public ulong? Ulong { get; } public List<float> ListOfFloats { get; } public MyClassWithNumbers(ulong? @ulong, List<float> listOfFloats) { Ulong = @ulong; ListOfFloats = listOfFloats; } } [Fact] public static void Number_AsObjectWithParameterizedCtor_PropHasAttribute() { string json = @"{""ListOfFloats"":[""1""]}"; // Strict handling on property overrides loose global policy. Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyClassWithNumbers_PropsHasAttribute>(json, s_optionReadFromStr)); // Serialize json = @"{""ListOfFloats"":[1]}"; MyClassWithNumbers_PropsHasAttribute obj = JsonSerializer.Deserialize<MyClassWithNumbers_PropsHasAttribute>(json); // Number serialized as JSON number due to strict handling on property which overrides loose global policy. Assert.Equal(json, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class MyClassWithNumbers_PropsHasAttribute { [JsonNumberHandling(JsonNumberHandling.Strict)] public List<float> ListOfFloats { get; } public MyClassWithNumbers_PropsHasAttribute(List<float> listOfFloats) { ListOfFloats = listOfFloats; } } [Fact] public static void FloatingPointConstants_Pass() { // Valid values PerformFloatingPointSerialization("NaN"); PerformFloatingPointSerialization("Infinity"); PerformFloatingPointSerialization("-Infinity"); PerformFloatingPointSerialization("\u004EaN"); // NaN PerformFloatingPointSerialization("Inf\u0069ni\u0074y"); // Infinity PerformFloatingPointSerialization("\u002DInf\u0069nity"); // -Infinity static void PerformFloatingPointSerialization(string testString) { string testStringAsJson = $@"""{testString}"""; string testJson = @$"{{""FloatNumber"":{testStringAsJson},""DoubleNumber"":{testStringAsJson}}}"; StructWithNumbers obj; switch (testString) { case "NaN": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.NaN, obj.FloatNumber); Assert.Equal(double.NaN, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.NaN, obj.FloatNumber); Assert.Equal(double.NaN, obj.DoubleNumber); break; case "Infinity": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.PositiveInfinity, obj.FloatNumber); Assert.Equal(double.PositiveInfinity, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.PositiveInfinity, obj.FloatNumber); Assert.Equal(double.PositiveInfinity, obj.DoubleNumber); break; case "-Infinity": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.NegativeInfinity, obj.FloatNumber); Assert.Equal(double.NegativeInfinity, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.NegativeInfinity, obj.FloatNumber); Assert.Equal(double.NegativeInfinity, obj.DoubleNumber); break; default: Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); return; } JsonTestHelper.AssertJsonEqual(testJson, JsonSerializer.Serialize(obj, s_optionsAllowFloatConstants)); JsonTestHelper.AssertJsonEqual(testJson, JsonSerializer.Serialize(obj, s_optionWriteAsStr)); } } [Theory] [InlineData("naN")] [InlineData("Nan")] [InlineData("NAN")] [InlineData("+Infinity")] [InlineData("+infinity")] [InlineData("infinity")] [InlineData("infinitY")] [InlineData("INFINITY")] [InlineData("+INFINITY")] [InlineData("-infinity")] [InlineData("-infinitY")] [InlineData("-INFINITY")] [InlineData(" NaN")] [InlineData("NaN ")] [InlineData(" Infinity")] [InlineData(" -Infinity")] [InlineData("Infinity ")] [InlineData("-Infinity ")] [InlineData("a-Infinity")] [InlineData("NaNa")] [InlineData("Infinitya")] [InlineData("-Infinitya")] #pragma warning disable xUnit1025 // Theory method 'FloatingPointConstants_Fail' on test class 'NumberHandlingTests' has InlineData duplicate(s) [InlineData("\u006EaN")] // "naN" [InlineData("\u0020Inf\u0069ni\u0074y")] // " Infinity" [InlineData("\u002BInf\u0069nity")] // "+Infinity" #pragma warning restore xUnit1025 public static void FloatingPointConstants_Fail(string testString) { string testStringAsJson = $@"""{testString}"""; string testJson = @$"{{""FloatNumber"":{testStringAsJson}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr)); testJson = @$"{{""DoubleNumber"":{testStringAsJson}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr)); } [Fact] public static void AllowFloatingPointConstants_WriteAsNumber_IfNotConstant() { float @float = 1; // Not written as "1" Assert.Equal("1", JsonSerializer.Serialize(@float, s_optionsAllowFloatConstants)); double @double = 1; // Not written as "1" Assert.Equal("1", JsonSerializer.Serialize(@double, s_optionsAllowFloatConstants)); } [Theory] [InlineData("NaN")] [InlineData("Infinity")] [InlineData("-Infinity")] public static void Unquoted_FloatingPointConstants_Read_Fail(string testString) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float>(testString, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double?>(testString, s_optionReadFromStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double>(testString, s_optionReadFromStrAllowFloatConstants)); } private struct StructWithNumbers { public float FloatNumber { get; set; } public double DoubleNumber { get; set; } } [Fact] public static void ReadFromString_AllowFloatingPoint() { string json = @"{""IntNumber"":""1"",""FloatNumber"":""NaN""}"; ClassWithNumbers obj = JsonSerializer.Deserialize<ClassWithNumbers>(json, s_optionReadFromStrAllowFloatConstants); Assert.Equal(1, obj.IntNumber); Assert.Equal(float.NaN, obj.FloatNumber); JsonTestHelper.AssertJsonEqual(@"{""IntNumber"":1,""FloatNumber"":""NaN""}", JsonSerializer.Serialize(obj, s_optionReadFromStrAllowFloatConstants)); } [Fact] public static void WriteAsString_AllowFloatingPoint() { string json = @"{""IntNumber"":""1"",""FloatNumber"":""NaN""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithNumbers>(json, s_optionWriteAsStrAllowFloatConstants)); var obj = new ClassWithNumbers { IntNumber = 1, FloatNumber = float.NaN }; JsonTestHelper.AssertJsonEqual(json, JsonSerializer.Serialize(obj, s_optionWriteAsStrAllowFloatConstants)); } public class ClassWithNumbers { public int IntNumber { get; set; } public float FloatNumber { get; set; } } [Fact] public static void FloatingPointConstants_IncompatibleNumber() { AssertFloatingPointIncompatible_Fails<byte>(); AssertFloatingPointIncompatible_Fails<sbyte>(); AssertFloatingPointIncompatible_Fails<short>(); AssertFloatingPointIncompatible_Fails<int>(); AssertFloatingPointIncompatible_Fails<long>(); AssertFloatingPointIncompatible_Fails<ushort>(); AssertFloatingPointIncompatible_Fails<uint>(); AssertFloatingPointIncompatible_Fails<ulong>(); AssertFloatingPointIncompatible_Fails<decimal>(); AssertFloatingPointIncompatible_Fails<byte?>(); AssertFloatingPointIncompatible_Fails<sbyte?>(); AssertFloatingPointIncompatible_Fails<short?>(); AssertFloatingPointIncompatible_Fails<int?>(); AssertFloatingPointIncompatible_Fails<long?>(); AssertFloatingPointIncompatible_Fails<ushort?>(); AssertFloatingPointIncompatible_Fails<uint?>(); AssertFloatingPointIncompatible_Fails<ulong?>(); AssertFloatingPointIncompatible_Fails<decimal?>(); } private static void AssertFloatingPointIncompatible_Fails<T>() { string[] testCases = new[] { @"""NaN""", @"""Infinity""", @"""-Infinity""", }; foreach (string test in testCases) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(test, s_optionReadFromStrAllowFloatConstants)); } } [Fact] public static void UnsupportedFormats() { AssertUnsupportedFormatThrows<byte>(); AssertUnsupportedFormatThrows<sbyte>(); AssertUnsupportedFormatThrows<short>(); AssertUnsupportedFormatThrows<int>(); AssertUnsupportedFormatThrows<long>(); AssertUnsupportedFormatThrows<ushort>(); AssertUnsupportedFormatThrows<uint>(); AssertUnsupportedFormatThrows<ulong>(); AssertUnsupportedFormatThrows<float>(); AssertUnsupportedFormatThrows<decimal>(); AssertUnsupportedFormatThrows<byte?>(); AssertUnsupportedFormatThrows<sbyte?>(); AssertUnsupportedFormatThrows<short?>(); AssertUnsupportedFormatThrows<int?>(); AssertUnsupportedFormatThrows<long?>(); AssertUnsupportedFormatThrows<ushort?>(); AssertUnsupportedFormatThrows<uint?>(); AssertUnsupportedFormatThrows<ulong?>(); AssertUnsupportedFormatThrows<float?>(); AssertUnsupportedFormatThrows<decimal?>(); } private static void AssertUnsupportedFormatThrows<T>() { string[] testCases = new[] { "$123.46", // Currency "100.00 %", // Percent "1234,57", // Fixed point "00FF", // Hexadecimal }; foreach (string test in testCases) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(test, s_optionReadFromStr)); } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/49936", TestPlatforms.Android)] public static void EscapingTest() { // Cause all characters to be escaped. var encoderSettings = new TextEncoderSettings(); encoderSettings.ForbidCharacters('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '+', '-', 'e', 'E'); JavaScriptEncoder encoder = JavaScriptEncoder.Create(encoderSettings); var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Encoder = encoder }; PerformEscapingTest(JsonNumberTestData.Bytes, options); PerformEscapingTest(JsonNumberTestData.SBytes, options); PerformEscapingTest(JsonNumberTestData.Shorts, options); PerformEscapingTest(JsonNumberTestData.Ints, options); PerformEscapingTest(JsonNumberTestData.Longs, options); PerformEscapingTest(JsonNumberTestData.UShorts, options); PerformEscapingTest(JsonNumberTestData.UInts, options); PerformEscapingTest(JsonNumberTestData.ULongs, options); PerformEscapingTest(JsonNumberTestData.Floats, options); PerformEscapingTest(JsonNumberTestData.Doubles, options); PerformEscapingTest(JsonNumberTestData.Decimals, options); } private static void PerformEscapingTest<T>(List<T> numbers, JsonSerializerOptions options) { // All input characters are escaped IEnumerable<string> numbersAsStrings = numbers.Select(num => GetNumberAsString(num)); string input = JsonSerializer.Serialize(numbersAsStrings, options); AssertListNumbersEscaped(input); // Unescaping works List<T> deserialized = JsonSerializer.Deserialize<List<T>>(input, options); Assert.Equal(numbers.Count, deserialized.Count); for (int i = 0; i < numbers.Count; i++) { Assert.Equal(numbers[i], deserialized[i]); } // Every number is written as a string, and custom escaping is not honored. string serialized = JsonSerializer.Serialize(deserialized, options); AssertListNumbersUnescaped(serialized); } private static void AssertListNumbersEscaped(string json) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(json)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndArray) { break; } else { Assert.Equal(JsonTokenType.String, reader.TokenType); Assert.True(reader.ValueSpan.IndexOf((byte)'\\') != -1); } } } private static void AssertListNumbersUnescaped(string json) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(json)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndArray) { break; } else { Assert.Equal(JsonTokenType.String, reader.TokenType); Assert.True(reader.ValueSpan.IndexOf((byte)'\\') == -1); } } } [Fact] public static void Number_RoundtripNull() { Perform_Number_RoundTripNull_Test<byte>(); Perform_Number_RoundTripNull_Test<sbyte>(); Perform_Number_RoundTripNull_Test<short>(); Perform_Number_RoundTripNull_Test<int>(); Perform_Number_RoundTripNull_Test<long>(); Perform_Number_RoundTripNull_Test<ushort>(); Perform_Number_RoundTripNull_Test<uint>(); Perform_Number_RoundTripNull_Test<ulong>(); Perform_Number_RoundTripNull_Test<float>(); Perform_Number_RoundTripNull_Test<decimal>(); } private static void Perform_Number_RoundTripNull_Test<T>() { string nullAsJson = "null"; string nullAsQuotedJson = $@"""{nullAsJson}"""; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsJson, s_optionReadAndWriteFromStr)); Assert.Equal("0", JsonSerializer.Serialize(default(T))); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsQuotedJson, s_optionReadAndWriteFromStr)); } [Fact] public static void NullableNumber_RoundtripNull() { Perform_NullableNumber_RoundTripNull_Test<byte?>(); Perform_NullableNumber_RoundTripNull_Test<sbyte?>(); Perform_NullableNumber_RoundTripNull_Test<short?>(); Perform_NullableNumber_RoundTripNull_Test<int?>(); Perform_NullableNumber_RoundTripNull_Test<long?>(); Perform_NullableNumber_RoundTripNull_Test<ushort?>(); Perform_NullableNumber_RoundTripNull_Test<uint?>(); Perform_NullableNumber_RoundTripNull_Test<ulong?>(); Perform_NullableNumber_RoundTripNull_Test<float?>(); Perform_NullableNumber_RoundTripNull_Test<decimal?>(); } private static void Perform_NullableNumber_RoundTripNull_Test<T>() { string nullAsJson = "null"; string nullAsQuotedJson = $@"""{nullAsJson}"""; Assert.Null(JsonSerializer.Deserialize<T>(nullAsJson, s_optionReadAndWriteFromStr)); Assert.Equal(nullAsJson, JsonSerializer.Serialize(default(T))); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsQuotedJson, s_optionReadAndWriteFromStr)); } [Fact] public static void Disallow_ArbritaryStrings_On_AllowFloatingPointConstants() { string json = @"""12345"""; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<byte>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<sbyte>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<short>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<int>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<long>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ushort>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<uint>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ulong>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<decimal>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<byte?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<sbyte?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<short?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<int?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<long?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ushort?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<uint?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ulong?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<decimal?>(json, s_optionsAllowFloatConstants)); } [Fact] public static void Attributes_OnMembers_Work() { // Bad JSON because Int should not be string. string intIsString = @"{""Float"":""1234.5"",""Int"":""12345""}"; // Good JSON because Float can be string. string floatIsString = @"{""Float"":""1234.5"",""Int"":12345}"; // Good JSON because Float can be number. string floatIsNumber = @"{""Float"":1234.5,""Int"":12345}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(intIsString)); ClassWith_Attribute_OnNumber obj = JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(floatIsString); Assert.Equal(1234.5, obj.Float); Assert.Equal(12345, obj.Int); obj = JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(floatIsNumber); Assert.Equal(1234.5, obj.Float); Assert.Equal(12345, obj.Int); // Per options, float should be written as string. JsonTestHelper.AssertJsonEqual(floatIsString, JsonSerializer.Serialize(obj)); } private class ClassWith_Attribute_OnNumber { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public float Float { get; set; } public int Int { get; set; } } [Fact] public static void Attribute_OnRootType_Works() { // Not allowed string floatIsString = @"{""Float"":""1234"",""Int"":123}"; // Allowed string floatIsNan = @"{""Float"":""NaN"",""Int"":123}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<Type_AllowFloatConstants>(floatIsString)); Type_AllowFloatConstants obj = JsonSerializer.Deserialize<Type_AllowFloatConstants>(floatIsNan); Assert.Equal(float.NaN, obj.Float); Assert.Equal(123, obj.Int); JsonTestHelper.AssertJsonEqual(floatIsNan, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowNamedFloatingPointLiterals)] private class Type_AllowFloatConstants { public float Float { get; set; } public int Int { get; set; } } [Fact] public static void AttributeOnType_WinsOver_GlobalOption() { // Global options strict, type options loose string json = @"{""Float"":""12345""}"; var obj1 = JsonSerializer.Deserialize<ClassWith_LooseAttribute>(json); Assert.Equal(@"{""Float"":""12345""}", JsonSerializer.Serialize(obj1)); // Global options loose, type options strict json = @"{""Float"":""12345""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_StrictAttribute>(json, s_optionReadAndWriteFromStr)); var obj2 = new ClassWith_StrictAttribute() { Float = 12345 }; Assert.Equal(@"{""Float"":12345}", JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr)); } [JsonNumberHandling(JsonNumberHandling.Strict)] public class ClassWith_StrictAttribute { public float Float { get; set; } } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] private class ClassWith_LooseAttribute { public float Float { get; set; } } [Fact] public static void AttributeOnMember_WinsOver_AttributeOnType() { string json = @"{""Double"":""NaN""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_Attribute_On_TypeAndMember>(json)); var obj = new ClassWith_Attribute_On_TypeAndMember { Double = float.NaN }; Assert.Throws<ArgumentException>(() => JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowNamedFloatingPointLiterals)] private class ClassWith_Attribute_On_TypeAndMember { [JsonNumberHandling(JsonNumberHandling.Strict)] public double Double { get; set; } } [Fact] public static void Attribute_OnNestedType_Works() { string jsonWithShortProperty = @"{""Short"":""1""}"; ClassWith_ReadAsStringAttribute obj = JsonSerializer.Deserialize<ClassWith_ReadAsStringAttribute>(jsonWithShortProperty); Assert.Equal(1, obj.Short); string jsonWithMyObjectProperty = @"{""MyObject"":{""Float"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_ReadAsStringAttribute>(jsonWithMyObjectProperty)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString)] public class ClassWith_ReadAsStringAttribute { public short Short { get; set; } public ClassWith_StrictAttribute MyObject { get; set; } } [Fact] public static void MemberAttributeAppliesToCollection_SimpleElements() { RunTest<int[]>(); RunTest<ConcurrentQueue<int>>(); RunTest<GenericICollectionWrapper<int>>(); RunTest<IEnumerable<int>>(); RunTest<Collection<int>>(); RunTest<ImmutableList<int>>(); RunTest<HashSet<int>>(); RunTest<List<int>>(); RunTest<IList<int>>(); RunTest<IList>(); RunTest<Queue<int>>(); static void RunTest<T>() { string json = @"{""MyList"":[""1"",""2""]}"; ClassWithSimpleCollectionProperty<T> obj = global::System.Text.Json.JsonSerializer.Deserialize<ClassWithSimpleCollectionProperty<T>>(json); Assert.Equal(json, global::System.Text.Json.JsonSerializer.Serialize(obj)); } } public class ClassWithSimpleCollectionProperty<T> { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public T MyList { get; set; } } [Fact] public static void NestedCollectionElementTypeHandling_Overrides_GlobalOption() { // Strict policy on the collection element type overrides read-as-string on the collection property string json = @"{""MyList"":[{""Float"":""1""}]}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithComplexListProperty>(json, s_optionReadAndWriteFromStr)); // Strict policy on the collection element type overrides write-as-string on the collection property var obj = new ClassWithComplexListProperty { MyList = new List<ClassWith_StrictAttribute> { new ClassWith_StrictAttribute { Float = 1 } } }; Assert.Equal(@"{""MyList"":[{""Float"":1}]}", JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } public class ClassWithComplexListProperty { public List<ClassWith_StrictAttribute> MyList { get; set; } } [Fact] public static void NumberHandlingAttribute_NotAllowedOn_CollectionOfNonNumbers() { Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWith_AttributeOnComplexListProperty>("")); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWith_AttributeOnComplexListProperty())); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWith_AttributeOnComplexDictionaryProperty>("")); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWith_AttributeOnComplexDictionaryProperty())); } public class ClassWith_AttributeOnComplexListProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<ClassWith_StrictAttribute> MyList { get; set; } } public class ClassWith_AttributeOnComplexDictionaryProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString)] public Dictionary<string, ClassWith_StrictAttribute> MyDictionary { get; set; } } [Fact] public static void MemberAttributeAppliesToDictionary_SimpleElements() { string json = @"{""First"":""1"",""Second"":""2""}"; ClassWithSimpleDictionaryProperty obj = JsonSerializer.Deserialize<ClassWithSimpleDictionaryProperty>(json); } public class ClassWithSimpleDictionaryProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public Dictionary<string, int> MyDictionary { get; set; } } [Fact] public static void NestedDictionaryElementTypeHandling_Overrides_GlobalOption() { // Strict policy on the dictionary element type overrides read-as-string on the collection property. string json = @"{""MyDictionary"":{""Key"":{""Float"":""1""}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithComplexDictionaryProperty>(json, s_optionReadFromStr)); // Strict policy on the collection element type overrides write-as-string on the collection property var obj = new ClassWithComplexDictionaryProperty { MyDictionary = new Dictionary<string, ClassWith_StrictAttribute> { ["Key"] = new ClassWith_StrictAttribute { Float = 1 } } }; Assert.Equal(@"{""MyDictionary"":{""Key"":{""Float"":1}}}", JsonSerializer.Serialize(obj, s_optionReadFromStr)); } public class ClassWithComplexDictionaryProperty { public Dictionary<string, ClassWith_StrictAttribute> MyDictionary { get; set; } } [Fact] public static void TypeAttributeAppliesTo_CustomCollectionElements() { string json = @"[""1""]"; MyCustomList obj = JsonSerializer.Deserialize<MyCustomList>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class MyCustomList : List<int> { } [Fact] public static void TypeAttributeAppliesTo_CustomCollectionElements_HonoredWhenProperty() { string json = @"{""List"":[""1""]}"; ClassWithCustomList obj = JsonSerializer.Deserialize<ClassWithCustomList>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } public class ClassWithCustomList { public MyCustomList List { get; set; } } [Fact] public static void TypeAttributeAppliesTo_CustomDictionaryElements() { string json = @"{""Key"":""1""}"; MyCustomDictionary obj = JsonSerializer.Deserialize<MyCustomDictionary>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class MyCustomDictionary : Dictionary<string, int> { } [Fact] public static void TypeAttributeAppliesTo_CustomDictionaryElements_HonoredWhenProperty() { string json = @"{""Dictionary"":{""Key"":""1""}}"; ClassWithCustomDictionary obj = JsonSerializer.Deserialize<ClassWithCustomDictionary>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } public class ClassWithCustomDictionary { public MyCustomDictionary Dictionary { get; set; } } [Fact] public static void Attribute_OnType_NotRecursive() { // Recursive behavior, where number handling setting on a property is applied to subsequent // properties in its type closure, would allow a string number. This is not supported. string json = @"{""NestedClass"":{""MyInt"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<AttributeAppliedToFirstLevelProp>(json)); var obj = new AttributeAppliedToFirstLevelProp { NestedClass = new NonNumberType { MyInt = 1 } }; Assert.Equal(@"{""NestedClass"":{""MyInt"":1}}", JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class AttributeAppliedToFirstLevelProp { public NonNumberType NestedClass { get; set; } } public class NonNumberType { public int MyInt { get; set; } } [Fact] public static void HandlingOnMemberOverridesHandlingOnType_Enumerable() { string json = @"{""List"":[""1""]}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyCustomListWrapper>(json)); var obj = new MyCustomListWrapper { List = new MyCustomList { 1 } }; Assert.Equal(@"{""List"":[1]}", JsonSerializer.Serialize(obj)); } public class MyCustomListWrapper { [JsonNumberHandling(JsonNumberHandling.Strict)] public MyCustomList List { get; set; } } [Fact] public static void HandlingOnMemberOverridesHandlingOnType_Dictionary() { string json = @"{""Dictionary"":{""Key"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyCustomDictionaryWrapper>(json)); var obj1 = new MyCustomDictionaryWrapper { Dictionary = new MyCustomDictionary { ["Key"] = 1 } }; Assert.Equal(@"{""Dictionary"":{""Key"":1}}", JsonSerializer.Serialize(obj1)); } public class MyCustomDictionaryWrapper { [JsonNumberHandling(JsonNumberHandling.Strict)] public MyCustomDictionary Dictionary { get; set; } } [Fact] public static void Attribute_Allowed_On_NonNumber_NonCollection_Property() { const string Json = @"{""MyProp"":{""MyInt"":1}}"; ClassWith_NumberHandlingOn_ObjectProperty obj = JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_ObjectProperty>(Json); Assert.Equal(1, obj.MyProp.MyInt); string json = JsonSerializer.Serialize(obj); Assert.Equal(Json, json); } public class ClassWith_NumberHandlingOn_ObjectProperty { [JsonNumberHandling(JsonNumberHandling.Strict)] public NonNumberType MyProp { get; set; } } [Fact] public static void Attribute_Allowed_On_Property_WithCustomConverter() { string json = @"{""Prop"":1}"; // Converter returns 25 regardless of input. var obj = JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_Property_WithCustomConverter>(json); Assert.Equal(25, obj.Prop); // Converter throws this exception regardless of input. NotImplementedException ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj)); Assert.Equal("Converter was called", ex.Message); } public class ClassWith_NumberHandlingOn_Property_WithCustomConverter { [JsonNumberHandling(JsonNumberHandling.Strict)] [JsonConverter(typeof(ConverterForInt32))] public int Prop { get; set; } } [Fact] public static void Attribute_Allowed_On_Type_WithCustomConverter() { string json = @"{}"; NotImplementedException ex; // Assert regular Read/Write methods on custom converter are called. ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_Type_WithCustomConverter>(json)); Assert.Equal("Converter was called", ex.Message); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(new ClassWith_NumberHandlingOn_Type_WithCustomConverter())); Assert.Equal("Converter was called", ex.Message); } [JsonNumberHandling(JsonNumberHandling.Strict)] [JsonConverter(typeof(ConverterForMyType))] public class ClassWith_NumberHandlingOn_Type_WithCustomConverter { } private class ConverterForMyType : JsonConverter<ClassWith_NumberHandlingOn_Type_WithCustomConverter> { public override ClassWith_NumberHandlingOn_Type_WithCustomConverter Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } public override void Write(Utf8JsonWriter writer, ClassWith_NumberHandlingOn_Type_WithCustomConverter value, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } } [Fact] public static void CustomConverterOverridesBuiltInLogic() { var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForInt32(), new ConverterForFloat() } }; string json = @"""32"""; // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<int>(json, options)); // Converter throws this exception regardless of input. NotImplementedException ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(4, options)); Assert.Equal("Converter was called", ex.Message); json = @"""NaN"""; // Converter returns 25 if NaN. Assert.Equal(25, JsonSerializer.Deserialize<float?>(json, options)); // Converter writes 25 if NaN. Assert.Equal("25", JsonSerializer.Serialize((float?)float.NaN, options)); } public class ConverterForFloat : JsonConverter<float?> { public override float? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (reader.TokenType == JsonTokenType.String && reader.GetString() == "NaN") { return 25; } throw new NotSupportedException(); } public override void Write(Utf8JsonWriter writer, float? value, JsonSerializerOptions options) { if (float.IsNaN(value.Value)) { writer.WriteNumberValue(25); return; } throw new NotSupportedException(); } } [Fact] public static void JsonNumberHandling_ArgOutOfRangeFail() { // Global options ArgumentOutOfRangeException ex = Assert.Throws<ArgumentOutOfRangeException>( () => new JsonSerializerOptions { NumberHandling = (JsonNumberHandling)(-1) }); Assert.Contains("value", ex.ToString()); Assert.Throws<ArgumentOutOfRangeException>( () => new JsonSerializerOptions { NumberHandling = (JsonNumberHandling)(8) }); ex = Assert.Throws<ArgumentOutOfRangeException>( () => new JsonNumberHandlingAttribute((JsonNumberHandling)(-1))); Assert.Contains("handling", ex.ToString()); Assert.Throws<ArgumentOutOfRangeException>( () => new JsonNumberHandlingAttribute((JsonNumberHandling)(8))); } [Fact] public static void InternalCollectionConverter_CustomNumberConverter_GlobalOption() { NotImplementedException ex; var list = new List<int> { 1 }; var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForInt32() } }; // Assert converter methods are called and not Read/WriteWithNumberHandling (which would throw InvalidOperationException). // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<List<int>>(@"[""1""]", options)[0]); // Converter throws this exception regardless of input. ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(list, options)); Assert.Equal("Converter was called", ex.Message); var list2 = new List<int?> { 1 }; Assert.Equal(25, JsonSerializer.Deserialize<List<int?>>(@"[""1""]", options)[0]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(list2, options)); Assert.Equal("Converter was called", ex.Message); // Okay to set number handling for number collection property when number is handled with custom converter; // converter Read/Write methods called. ClassWithListPropAndAttribute obj1 = JsonSerializer.Deserialize<ClassWithListPropAndAttribute>(@"{""Prop"":[""1""]}", options); Assert.Equal(25, obj1.Prop[0]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj1, options)); Assert.Equal("Converter was called", ex.Message); ClassWithDictPropAndAttribute obj2 = JsonSerializer.Deserialize<ClassWithDictPropAndAttribute>(@"{""Prop"":{""1"":""1""}}", options); Assert.Equal(25, obj2.Prop[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj2, options)); Assert.Equal("Converter was called", ex.Message); } private class ClassWithListPropAndAttribute { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<int> Prop { get; set; } } private class ClassWithDictPropAndAttribute { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public Dictionary<int, int?> Prop { get; set; } } [Fact] public static void InternalCollectionConverter_CustomNumberConverter_OnProperty() { // Invalid to set number handling for number collection property when number is handled with custom converter. var ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWithListPropAndAttribute_ConverterOnProp>("")); Assert.Contains(nameof(ClassWithListPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWithListPropAndAttribute_ConverterOnProp())); Assert.Contains(nameof(ClassWithListPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWithDictPropAndAttribute_ConverterOnProp>("")); Assert.Contains(nameof(ClassWithDictPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWithDictPropAndAttribute_ConverterOnProp())); Assert.Contains(nameof(ClassWithDictPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); } private class ClassWithListPropAndAttribute_ConverterOnProp { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] [JsonConverter(typeof(ListOfIntConverter))] public List<int> IntProp { get; set; } } private class ClassWithDictPropAndAttribute_ConverterOnProp { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] [JsonConverter(typeof(ClassWithDictPropAndAttribute_ConverterOnProp))] public Dictionary<int, int?> IntProp { get; set; } } public class ListOfIntConverter : JsonConverter<List<int>> { public override List<int> Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) => throw new NotImplementedException(); public override void Write(Utf8JsonWriter writer, List<int> value, JsonSerializerOptions options) => throw new NotImplementedException(); } [Fact] public static void InternalCollectionConverter_CustomNullableNumberConverter() { NotImplementedException ex; var dict = new Dictionary<int, int?> { [1] = 1 }; var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForNullableInt32() } }; // Assert converter methods are called and not Read/WriteWithNumberHandling (which would throw InvalidOperationException). // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<Dictionary<int, int?>>(@"{""1"":""1""}", options)[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(dict, options)); Assert.Equal("Converter was called", ex.Message); var obj = JsonSerializer.Deserialize<ClassWithDictPropAndAttribute>(@"{""Prop"":{""1"":""1""}}", options); Assert.Equal(25, obj.Prop[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj, options)); Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(dict, options)); Assert.Equal("Converter was called", ex.Message); } public class ConverterForNullableInt32 : JsonConverter<int?> { public override int? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { return 25; } public override void Write(Utf8JsonWriter writer, int? value, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } } /// <summary> /// Example of a custom converter that uses the options to determine behavior. /// </summary> [Fact] public static void AdaptableCustomConverter() { // Baseline without custom converter PlainClassWithList obj = new() { Prop = new List<int>() { 1 } }; string json = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal("{\"Prop\":[\"1\"]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, s_optionReadAndWriteFromStr); Assert.Equal(1, obj.Prop[0]); // First with numbers JsonSerializerOptions options = new() { Converters = { new AdaptableInt32Converter() } }; obj = new() { Prop = new List<int>() { 1 } }; json = JsonSerializer.Serialize(obj, options); Assert.Equal("{\"Prop\":[101]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, options); Assert.Equal(1, obj.Prop[0]); // Then with strings options = new() { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString, Converters = { new AdaptableInt32Converter() } }; obj = new() { Prop = new List<int>() { 1 } }; json = JsonSerializer.Serialize(obj, options); Assert.Equal("{\"Prop\":[\"101\"]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, options); Assert.Equal(1, obj.Prop[0]); } private class PlainClassWithList { public List<int> Prop { get; set; } } public class AdaptableInt32Converter : JsonConverter<int> { public override int Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if ((JsonNumberHandling.AllowReadingFromString & options.NumberHandling) != 0) { // Assume it's a string; don't use TryParse(). return int.Parse(reader.GetString(), CultureInfo.InvariantCulture) - 100; } else { return reader.GetInt32() - 100; } } public override void Write(Utf8JsonWriter writer, int value, JsonSerializerOptions options) { if ((JsonNumberHandling.WriteAsString & options.NumberHandling) != 0) { writer.WriteStringValue((value + 100).ToString(CultureInfo.InvariantCulture)); } else { writer.WriteNumberValue(value + 100); } } } } public class NumberHandlingTests_AsyncStreamOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_AsyncStreamOverload() : base(JsonSerializerWrapperForString.AsyncStreamSerializer) { } } public class NumberHandlingTests_SyncStreamOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_SyncStreamOverload() : base(JsonSerializerWrapperForString.SyncStreamSerializer) { } } public class NumberHandlingTests_SyncOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_SyncOverload() : base(JsonSerializerWrapperForString.StringSerializer) { } } public class NumberHandlingTests_Document : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Document() : base(JsonSerializerWrapperForString.DocumentSerializer) { } } public class NumberHandlingTests_Element : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Element() : base(JsonSerializerWrapperForString.ElementSerializer) { } } public class NumberHandlingTests_Node : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Node() : base(JsonSerializerWrapperForString.NodeSerializer) { } } public abstract class NumberHandlingTests_OverloadSpecific { private JsonSerializerWrapperForString Deserializer { get; } public NumberHandlingTests_OverloadSpecific(JsonSerializerWrapperForString deserializer) { Deserializer = deserializer; } [Theory] [MemberData(nameof(NumberHandling_ForPropsReadAfter_DeserializingCtorParams_TestData))] public async Task NumberHandling_ForPropsReadAfter_DeserializingCtorParams(string json) { JsonSerializerOptions options = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString, PropertyNamingPolicy = JsonNamingPolicy.CamelCase, }; Result result = await Deserializer.DeserializeWrapper<Result>(json, options); JsonTestHelper.AssertJsonEqual(json, JsonSerializer.Serialize(result, options)); } public static IEnumerable<object[]> NumberHandling_ForPropsReadAfter_DeserializingCtorParams_TestData() { yield return new object[] { @"{ ""album"": { ""userPlayCount"": ""123"", ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""userPlayCount"": ""123"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""userPlayCount"": ""123"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" }, ""userPlayCount"": ""123"" } }" }; } public class Result { public Album Album { get; init; } } public class Album { public Album(string name, string artist) { Name = name; Artist = artist; } public string Name { get; init; } public string Artist { get; init; } public long? userPlayCount { get; init; } [JsonExtensionData] public Dictionary<string, JsonElement> ExtensionData { get; set; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; using System.Collections.Immutable; using System.Collections.ObjectModel; using System.Globalization; using System.IO; using System.Linq; using System.Text.Encodings.Web; using System.Text.Json.Tests; using System.Threading.Tasks; using Xunit; namespace System.Text.Json.Serialization.Tests { public static partial class NumberHandlingTests { private static readonly JsonSerializerOptions s_optionReadFromStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString }; private static readonly JsonSerializerOptions s_optionWriteAsStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.WriteAsString }; private static readonly JsonSerializerOptions s_optionReadAndWriteFromStr = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString }; private static readonly JsonSerializerOptions s_optionsAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowNamedFloatingPointLiterals }; private static readonly JsonSerializerOptions s_optionReadFromStrAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.AllowNamedFloatingPointLiterals }; private static readonly JsonSerializerOptions s_optionWriteAsStrAllowFloatConstants = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.WriteAsString | JsonNumberHandling.AllowNamedFloatingPointLiterals }; [Fact] public static void Number_AsRootType_RoundTrip() { RunAsRootTypeTest(JsonNumberTestData.Bytes); RunAsRootTypeTest(JsonNumberTestData.SBytes); RunAsRootTypeTest(JsonNumberTestData.Shorts); RunAsRootTypeTest(JsonNumberTestData.Ints); RunAsRootTypeTest(JsonNumberTestData.Longs); RunAsRootTypeTest(JsonNumberTestData.UShorts); RunAsRootTypeTest(JsonNumberTestData.UInts); RunAsRootTypeTest(JsonNumberTestData.ULongs); RunAsRootTypeTest(JsonNumberTestData.Floats); RunAsRootTypeTest(JsonNumberTestData.Doubles); RunAsRootTypeTest(JsonNumberTestData.Decimals); RunAsRootTypeTest(JsonNumberTestData.NullableBytes); RunAsRootTypeTest(JsonNumberTestData.NullableSBytes); RunAsRootTypeTest(JsonNumberTestData.NullableShorts); RunAsRootTypeTest(JsonNumberTestData.NullableInts); RunAsRootTypeTest(JsonNumberTestData.NullableLongs); RunAsRootTypeTest(JsonNumberTestData.NullableUShorts); RunAsRootTypeTest(JsonNumberTestData.NullableUInts); RunAsRootTypeTest(JsonNumberTestData.NullableULongs); RunAsRootTypeTest(JsonNumberTestData.NullableFloats); RunAsRootTypeTest(JsonNumberTestData.NullableDoubles); RunAsRootTypeTest(JsonNumberTestData.NullableDecimals); } private static void RunAsRootTypeTest<T>(List<T> numbers) { foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string json = $"{numberAsString}"; string jsonWithNumberAsString = @$"""{numberAsString}"""; PerformAsRootTypeSerialization(number, json, jsonWithNumberAsString); } } private static string GetNumberAsString<T>(T number) { return number switch { double @double => @double.ToString(JsonTestHelper.DoubleFormatString, CultureInfo.InvariantCulture), float @float => @float.ToString(JsonTestHelper.SingleFormatString, CultureInfo.InvariantCulture), decimal @decimal => @decimal.ToString(CultureInfo.InvariantCulture), _ => number.ToString() }; } private static void PerformAsRootTypeSerialization<T>(T number, string jsonWithNumberAsNumber, string jsonWithNumberAsString) { // Option: read from string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionReadFromStr)); Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionReadFromStr)); // Serialize Assert.Equal(jsonWithNumberAsNumber, JsonSerializer.Serialize(number, s_optionReadFromStr)); // Option: write as string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionWriteAsStr)); // Serialize Assert.Equal(jsonWithNumberAsString, JsonSerializer.Serialize(number, s_optionWriteAsStr)); // Option: read and write from/to string // Deserialize Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsNumber, s_optionReadAndWriteFromStr)); Assert.Equal(number, JsonSerializer.Deserialize<T>(jsonWithNumberAsString, s_optionReadAndWriteFromStr)); // Serialize Assert.Equal(jsonWithNumberAsString, JsonSerializer.Serialize(number, s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsBoxed_RootType() { string numberAsString = @"""2"""; int @int = 2; float @float = 2; int? nullableInt = 2; float? nullableFloat = 2; Assert.Equal(numberAsString, JsonSerializer.Serialize((object)@int, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)@float, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)nullableInt, s_optionReadAndWriteFromStr)); Assert.Equal(numberAsString, JsonSerializer.Serialize((object)nullableFloat, s_optionReadAndWriteFromStr)); Assert.Equal(2, (int)JsonSerializer.Deserialize(numberAsString, typeof(int), s_optionReadAndWriteFromStr)); Assert.Equal(2, (float)JsonSerializer.Deserialize(numberAsString, typeof(float), s_optionReadAndWriteFromStr)); Assert.Equal(2, (int?)JsonSerializer.Deserialize(numberAsString, typeof(int?), s_optionReadAndWriteFromStr)); Assert.Equal(2, (float?)JsonSerializer.Deserialize(numberAsString, typeof(float?), s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsBoxed_Property() { int @int = 1; float? nullableFloat = 2; string expected = @"{""MyInt"":""1"",""MyNullableFloat"":""2""}"; var obj = new Class_With_BoxedNumbers { MyInt = @int, MyNullableFloat = nullableFloat }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_BoxedNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyInt); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("1", el.GetString()); el = Assert.IsType<JsonElement>(obj.MyNullableFloat); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } public class Class_With_BoxedNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyInt { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyNullableFloat { get; set; } } [Fact] public static void Number_AsBoxed_CollectionRootType_Element() { int @int = 1; float? nullableFloat = 2; string expected = @"[""1""]"; var obj = new List<object> { @int }; string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj = JsonSerializer.Deserialize<List<object>>(serialized, s_optionReadAndWriteFromStr); JsonElement el = Assert.IsType<JsonElement>(obj[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("1", el.GetString()); expected = @"[""2""]"; IList obj2 = new object[] { nullableFloat }; serialized = JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj2 = JsonSerializer.Deserialize<IList>(serialized, s_optionReadAndWriteFromStr); el = Assert.IsType<JsonElement>(obj2[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } [Fact] public static void Number_AsBoxed_CollectionProperty_Element() { int @int = 2; float? nullableFloat = 2; string expected = @"{""MyInts"":[""2""],""MyNullableFloats"":[""2""]}"; var obj = new Class_With_ListsOfBoxedNumbers { MyInts = new List<object> { @int }, MyNullableFloats = new object[] { nullableFloat } }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_ListsOfBoxedNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyInts[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); el = Assert.IsType<JsonElement>(obj.MyNullableFloats[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal("2", el.GetString()); } public class Class_With_ListsOfBoxedNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<object> MyInts { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public IList MyNullableFloats { get; set; } } [Fact] public static void NonNumber_AsBoxed_Property() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"{{""MyDateTime"":{JsonSerializer.Serialize(dateTime)},""MyNullableGuid"":{JsonSerializer.Serialize(nullableGuid)}}}"; var obj = new Class_With_BoxedNonNumbers { MyDateTime = dateTime, MyNullableGuid = nullableGuid }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_BoxedNonNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyDateTime); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); el = Assert.IsType<JsonElement>(obj.MyNullableGuid); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid.Value, el.GetGuid()); } public class Class_With_BoxedNonNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyDateTime { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public object MyNullableGuid { get; set; } } [Fact] public static void NonNumber_AsBoxed_CollectionRootType_Element() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"[{JsonSerializer.Serialize(dateTime)}]"; var obj = new List<object> { dateTime }; string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj = JsonSerializer.Deserialize<List<object>>(serialized, s_optionReadAndWriteFromStr); JsonElement el = Assert.IsType<JsonElement>(obj[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); expected = @$"[{JsonSerializer.Serialize(nullableGuid)}]"; IList obj2 = new object[] { nullableGuid }; serialized = JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr); Assert.Equal(expected, serialized); obj2 = JsonSerializer.Deserialize<IList>(serialized, s_optionReadAndWriteFromStr); el = Assert.IsType<JsonElement>(obj2[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid.Value, el.GetGuid()); } [Fact] public static void NonNumber_AsBoxed_CollectionProperty_Element() { DateTime dateTime = DateTime.Now; Guid? nullableGuid = Guid.NewGuid(); string expected = @$"{{""MyDateTimes"":[{JsonSerializer.Serialize(dateTime)}],""MyNullableGuids"":[{JsonSerializer.Serialize(nullableGuid)}]}}"; var obj = new Class_With_ListsOfBoxedNonNumbers { MyDateTimes = new List<object> { dateTime }, MyNullableGuids = new object[] { nullableGuid } }; string serialized = JsonSerializer.Serialize(obj); JsonTestHelper.AssertJsonEqual(expected, serialized); obj = JsonSerializer.Deserialize<Class_With_ListsOfBoxedNonNumbers>(serialized); JsonElement el = Assert.IsType<JsonElement>(obj.MyDateTimes[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(dateTime, el.GetDateTime()); el = Assert.IsType<JsonElement>(obj.MyNullableGuids[0]); Assert.Equal(JsonValueKind.String, el.ValueKind); Assert.Equal(nullableGuid, el.GetGuid()); } public class Class_With_ListsOfBoxedNonNumbers { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<object> MyDateTimes { get; set; } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public IList MyNullableGuids { get; set; } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/49936", TestPlatforms.Android)] public static void Number_AsCollectionElement_RoundTrip() { RunAsCollectionElementTest(JsonNumberTestData.Bytes); RunAsCollectionElementTest(JsonNumberTestData.SBytes); RunAsCollectionElementTest(JsonNumberTestData.Shorts); RunAsCollectionElementTest(JsonNumberTestData.Ints); RunAsCollectionElementTest(JsonNumberTestData.Longs); RunAsCollectionElementTest(JsonNumberTestData.UShorts); RunAsCollectionElementTest(JsonNumberTestData.UInts); RunAsCollectionElementTest(JsonNumberTestData.ULongs); RunAsCollectionElementTest(JsonNumberTestData.Floats); RunAsCollectionElementTest(JsonNumberTestData.Doubles); RunAsCollectionElementTest(JsonNumberTestData.Decimals); RunAsCollectionElementTest(JsonNumberTestData.NullableBytes); RunAsCollectionElementTest(JsonNumberTestData.NullableSBytes); RunAsCollectionElementTest(JsonNumberTestData.NullableShorts); RunAsCollectionElementTest(JsonNumberTestData.NullableInts); RunAsCollectionElementTest(JsonNumberTestData.NullableLongs); RunAsCollectionElementTest(JsonNumberTestData.NullableUShorts); RunAsCollectionElementTest(JsonNumberTestData.NullableUInts); RunAsCollectionElementTest(JsonNumberTestData.NullableULongs); RunAsCollectionElementTest(JsonNumberTestData.NullableFloats); RunAsCollectionElementTest(JsonNumberTestData.NullableDoubles); RunAsCollectionElementTest(JsonNumberTestData.NullableDecimals); } private static void RunAsCollectionElementTest<T>(List<T> numbers) { StringBuilder jsonBuilder_NumbersAsNumbers = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsStrings = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsNumbersAndStrings = new StringBuilder(); StringBuilder jsonBuilder_NumbersAsNumbersAndStrings_Alternate = new StringBuilder(); bool asNumber = false; jsonBuilder_NumbersAsNumbers.Append("["); jsonBuilder_NumbersAsStrings.Append("["); jsonBuilder_NumbersAsNumbersAndStrings.Append("["); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append("["); foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string jsonWithNumberAsString = @$"""{numberAsString}"""; jsonBuilder_NumbersAsNumbers.Append($"{numberAsString},"); jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString},"); jsonBuilder_NumbersAsNumbersAndStrings.Append(asNumber ? $"{numberAsString}," : $"{jsonWithNumberAsString},"); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append(!asNumber ? $"{numberAsString}," : $"{jsonWithNumberAsString},"); asNumber = !asNumber; } jsonBuilder_NumbersAsNumbers.Remove(jsonBuilder_NumbersAsNumbers.Length - 1, 1); jsonBuilder_NumbersAsStrings.Remove(jsonBuilder_NumbersAsStrings.Length - 1, 1); jsonBuilder_NumbersAsNumbersAndStrings.Remove(jsonBuilder_NumbersAsNumbersAndStrings.Length - 1, 1); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Remove(jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Length - 1, 1); jsonBuilder_NumbersAsNumbers.Append("]"); jsonBuilder_NumbersAsStrings.Append("]"); jsonBuilder_NumbersAsNumbersAndStrings.Append("]"); jsonBuilder_NumbersAsNumbersAndStrings_Alternate.Append("]"); string jsonNumbersAsStrings = jsonBuilder_NumbersAsStrings.ToString(); PerformAsCollectionElementSerialization( numbers, jsonBuilder_NumbersAsNumbers.ToString(), jsonNumbersAsStrings, jsonBuilder_NumbersAsNumbersAndStrings.ToString(), jsonBuilder_NumbersAsNumbersAndStrings_Alternate.ToString()); // Reflection based tests for every collection type. RunAllCollectionsRoundTripTest<T>(jsonNumbersAsStrings); } private static void PerformAsCollectionElementSerialization<T>( List<T> numbers, string json_NumbersAsNumbers, string json_NumbersAsStrings, string json_NumbersAsNumbersAndStrings, string json_NumbersAsNumbersAndStrings_Alternate) { List<T> deserialized; // Option: read from string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionReadFromStr); AssertIEnumerableEqual(numbers, deserialized); // Serialize Assert.Equal(json_NumbersAsNumbers, JsonSerializer.Serialize(numbers, s_optionReadFromStr)); // Option: write as string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionWriteAsStr); AssertIEnumerableEqual(numbers, deserialized); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionWriteAsStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionWriteAsStr)); // Serialize Assert.Equal(json_NumbersAsStrings, JsonSerializer.Serialize(numbers, s_optionWriteAsStr)); // Option: read and write from/to string // Deserialize deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbers, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsStrings, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); deserialized = JsonSerializer.Deserialize<List<T>>(json_NumbersAsNumbersAndStrings_Alternate, s_optionReadAndWriteFromStr); AssertIEnumerableEqual(numbers, deserialized); // Serialize Assert.Equal(json_NumbersAsStrings, JsonSerializer.Serialize(numbers, s_optionReadAndWriteFromStr)); } private static void AssertIEnumerableEqual<T>(IEnumerable<T> list1, IEnumerable<T> list2) { IEnumerator<T> enumerator1 = list1.GetEnumerator(); IEnumerator<T> enumerator2 = list2.GetEnumerator(); while (enumerator1.MoveNext()) { enumerator2.MoveNext(); Assert.Equal(enumerator1.Current, enumerator2.Current); } Assert.False(enumerator2.MoveNext()); } private static void RunAllCollectionsRoundTripTest<T>(string json) { foreach (Type type in CollectionTestTypes.DeserializableGenericEnumerableTypes<T>()) { if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(HashSet<>)) { HashSet<T> obj1 = (HashSet<T>)JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); string serialized = JsonSerializer.Serialize(obj1, s_optionReadAndWriteFromStr); HashSet<T> obj2 = (HashSet<T>)JsonSerializer.Deserialize(serialized, type, s_optionReadAndWriteFromStr); Assert.Equal(obj1.Count, obj2.Count); foreach (T element in obj1) { Assert.True(obj2.Contains(element)); } } else if (type != typeof(byte[])) { object obj = JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); } } foreach (Type type in CollectionTestTypes.DeserializableNonGenericEnumerableTypes()) { // Deserialized as collection of JsonElements. object obj = JsonSerializer.Deserialize(json, type, s_optionReadAndWriteFromStr); // Serialized as strings with escaping. string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Ensure escaped values were serialized accurately List<T> list = JsonSerializer.Deserialize<List<T>>(serialized, s_optionReadAndWriteFromStr); serialized = JsonSerializer.Serialize(list, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); // Serialize instance which is a collection of numbers (not JsonElements). obj = Activator.CreateInstance(type, new[] { list }); serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal(json, serialized); } } [Fact] public static void Number_AsDictionaryElement_RoundTrip() { var dict = new Dictionary<int, float>(); for (int i = 0; i < 10; i++) { dict[JsonNumberTestData.Ints[i]] = JsonNumberTestData.Floats[i]; } // Serialize string serialized = JsonSerializer.Serialize(dict, s_optionReadAndWriteFromStr); AssertDictionaryElements_StringValues(serialized); // Deserialize dict = JsonSerializer.Deserialize<Dictionary<int, float>>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(dict, s_optionReadAndWriteFromStr)); } private static void AssertDictionaryElements_StringValues(string serialized) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(serialized)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndObject) { break; } else if (reader.TokenType == JsonTokenType.String) { Assert.True(reader.ValueSpan.IndexOf((byte)'\\') == -1); } else { Assert.Equal(JsonTokenType.PropertyName, reader.TokenType); } } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/39674", typeof(PlatformDetection), nameof(PlatformDetection.IsMonoInterpreter))] [SkipOnCoreClr("https://github.com/dotnet/runtime/issues/45464", ~RuntimeConfiguration.Release)] public static void DictionariesRoundTrip() { RunAllDictionariessRoundTripTest(JsonNumberTestData.ULongs); RunAllDictionariessRoundTripTest(JsonNumberTestData.Floats); RunAllDictionariessRoundTripTest(JsonNumberTestData.Doubles); } private static void RunAllDictionariessRoundTripTest<T>(List<T> numbers) { StringBuilder jsonBuilder_NumbersAsStrings = new StringBuilder(); jsonBuilder_NumbersAsStrings.Append("{"); foreach (T number in numbers) { string numberAsString = GetNumberAsString(number); string jsonWithNumberAsString = @$"""{numberAsString}"""; jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString}:"); jsonBuilder_NumbersAsStrings.Append($"{jsonWithNumberAsString},"); } jsonBuilder_NumbersAsStrings.Remove(jsonBuilder_NumbersAsStrings.Length - 1, 1); jsonBuilder_NumbersAsStrings.Append("}"); string jsonNumbersAsStrings = jsonBuilder_NumbersAsStrings.ToString(); foreach (Type type in CollectionTestTypes.DeserializableDictionaryTypes<string, T>()) { object obj = JsonSerializer.Deserialize(jsonNumbersAsStrings, type, s_optionReadAndWriteFromStr); JsonTestHelper.AssertJsonEqual(jsonNumbersAsStrings, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } foreach (Type type in CollectionTestTypes.DeserializableNonGenericDictionaryTypes()) { Dictionary<T, T> dict = JsonSerializer.Deserialize<Dictionary<T, T>>(jsonNumbersAsStrings, s_optionReadAndWriteFromStr); // Serialize instance which is a dictionary of numbers (not JsonElements). object obj = Activator.CreateInstance(type, new[] { dict }); string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); JsonTestHelper.AssertJsonEqual(jsonNumbersAsStrings, serialized); } } [Fact] public static void Number_AsPropertyValue_RoundTrip() { var obj = new Class_With_NullableUInt64_And_Float() { NullableUInt64Number = JsonNumberTestData.NullableULongs.LastOrDefault(), FloatNumbers = JsonNumberTestData.Floats }; // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<Class_With_NullableUInt64_And_Float>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class Class_With_NullableUInt64_And_Float { public ulong? NullableUInt64Number { get; set; } [JsonInclude] public List<float> FloatNumbers; } [Fact] public static void Number_AsKeyValuePairValue_RoundTrip() { var obj = new KeyValuePair<ulong?, List<float>>(JsonNumberTestData.NullableULongs.LastOrDefault(), JsonNumberTestData.Floats); // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<KeyValuePair<ulong?, List<float>>>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } [Fact] public static void Number_AsObjectWithParameterizedCtor_RoundTrip() { var obj = new MyClassWithNumbers(JsonNumberTestData.NullableULongs.LastOrDefault(), JsonNumberTestData.Floats); // Serialize string serialized = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); // Deserialize obj = JsonSerializer.Deserialize<MyClassWithNumbers>(serialized, s_optionReadAndWriteFromStr); // Test roundtrip JsonTestHelper.AssertJsonEqual(serialized, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class MyClassWithNumbers { public ulong? Ulong { get; } public List<float> ListOfFloats { get; } public MyClassWithNumbers(ulong? @ulong, List<float> listOfFloats) { Ulong = @ulong; ListOfFloats = listOfFloats; } } [Fact] public static void Number_AsObjectWithParameterizedCtor_PropHasAttribute() { string json = @"{""ListOfFloats"":[""1""]}"; // Strict handling on property overrides loose global policy. Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyClassWithNumbers_PropsHasAttribute>(json, s_optionReadFromStr)); // Serialize json = @"{""ListOfFloats"":[1]}"; MyClassWithNumbers_PropsHasAttribute obj = JsonSerializer.Deserialize<MyClassWithNumbers_PropsHasAttribute>(json); // Number serialized as JSON number due to strict handling on property which overrides loose global policy. Assert.Equal(json, JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } private class MyClassWithNumbers_PropsHasAttribute { [JsonNumberHandling(JsonNumberHandling.Strict)] public List<float> ListOfFloats { get; } public MyClassWithNumbers_PropsHasAttribute(List<float> listOfFloats) { ListOfFloats = listOfFloats; } } [Fact] public static void FloatingPointConstants_Pass() { // Valid values PerformFloatingPointSerialization("NaN"); PerformFloatingPointSerialization("Infinity"); PerformFloatingPointSerialization("-Infinity"); PerformFloatingPointSerialization("\u004EaN"); // NaN PerformFloatingPointSerialization("Inf\u0069ni\u0074y"); // Infinity PerformFloatingPointSerialization("\u002DInf\u0069nity"); // -Infinity static void PerformFloatingPointSerialization(string testString) { string testStringAsJson = $@"""{testString}"""; string testJson = @$"{{""FloatNumber"":{testStringAsJson},""DoubleNumber"":{testStringAsJson}}}"; StructWithNumbers obj; switch (testString) { case "NaN": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.NaN, obj.FloatNumber); Assert.Equal(double.NaN, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.NaN, obj.FloatNumber); Assert.Equal(double.NaN, obj.DoubleNumber); break; case "Infinity": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.PositiveInfinity, obj.FloatNumber); Assert.Equal(double.PositiveInfinity, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.PositiveInfinity, obj.FloatNumber); Assert.Equal(double.PositiveInfinity, obj.DoubleNumber); break; case "-Infinity": obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants); Assert.Equal(float.NegativeInfinity, obj.FloatNumber); Assert.Equal(double.NegativeInfinity, obj.DoubleNumber); obj = JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr); Assert.Equal(float.NegativeInfinity, obj.FloatNumber); Assert.Equal(double.NegativeInfinity, obj.DoubleNumber); break; default: Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); return; } JsonTestHelper.AssertJsonEqual(testJson, JsonSerializer.Serialize(obj, s_optionsAllowFloatConstants)); JsonTestHelper.AssertJsonEqual(testJson, JsonSerializer.Serialize(obj, s_optionWriteAsStr)); } } [Theory] [InlineData("naN")] [InlineData("Nan")] [InlineData("NAN")] [InlineData("+Infinity")] [InlineData("+infinity")] [InlineData("infinity")] [InlineData("infinitY")] [InlineData("INFINITY")] [InlineData("+INFINITY")] [InlineData("-infinity")] [InlineData("-infinitY")] [InlineData("-INFINITY")] [InlineData(" NaN")] [InlineData("NaN ")] [InlineData(" Infinity")] [InlineData(" -Infinity")] [InlineData("Infinity ")] [InlineData("-Infinity ")] [InlineData("a-Infinity")] [InlineData("NaNa")] [InlineData("Infinitya")] [InlineData("-Infinitya")] #pragma warning disable xUnit1025 // Theory method 'FloatingPointConstants_Fail' on test class 'NumberHandlingTests' has InlineData duplicate(s) [InlineData("\u006EaN")] // "naN" [InlineData("\u0020Inf\u0069ni\u0074y")] // " Infinity" [InlineData("\u002BInf\u0069nity")] // "+Infinity" #pragma warning restore xUnit1025 public static void FloatingPointConstants_Fail(string testString) { string testStringAsJson = $@"""{testString}"""; string testJson = @$"{{""FloatNumber"":{testStringAsJson}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr)); testJson = @$"{{""DoubleNumber"":{testStringAsJson}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<StructWithNumbers>(testJson, s_optionReadFromStr)); } [Fact] public static void AllowFloatingPointConstants_WriteAsNumber_IfNotConstant() { float @float = 1; // Not written as "1" Assert.Equal("1", JsonSerializer.Serialize(@float, s_optionsAllowFloatConstants)); double @double = 1; // Not written as "1" Assert.Equal("1", JsonSerializer.Serialize(@double, s_optionsAllowFloatConstants)); } [Theory] [InlineData("NaN")] [InlineData("Infinity")] [InlineData("-Infinity")] public static void Unquoted_FloatingPointConstants_Read_Fail(string testString) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float>(testString, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double?>(testString, s_optionReadFromStr)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double>(testString, s_optionReadFromStrAllowFloatConstants)); } private struct StructWithNumbers { public float FloatNumber { get; set; } public double DoubleNumber { get; set; } } [Fact] public static void ReadFromString_AllowFloatingPoint() { string json = @"{""IntNumber"":""1"",""FloatNumber"":""NaN""}"; ClassWithNumbers obj = JsonSerializer.Deserialize<ClassWithNumbers>(json, s_optionReadFromStrAllowFloatConstants); Assert.Equal(1, obj.IntNumber); Assert.Equal(float.NaN, obj.FloatNumber); JsonTestHelper.AssertJsonEqual(@"{""IntNumber"":1,""FloatNumber"":""NaN""}", JsonSerializer.Serialize(obj, s_optionReadFromStrAllowFloatConstants)); } [Fact] public static void WriteAsString_AllowFloatingPoint() { string json = @"{""IntNumber"":""1"",""FloatNumber"":""NaN""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithNumbers>(json, s_optionWriteAsStrAllowFloatConstants)); var obj = new ClassWithNumbers { IntNumber = 1, FloatNumber = float.NaN }; JsonTestHelper.AssertJsonEqual(json, JsonSerializer.Serialize(obj, s_optionWriteAsStrAllowFloatConstants)); } public class ClassWithNumbers { public int IntNumber { get; set; } public float FloatNumber { get; set; } } [Fact] public static void FloatingPointConstants_IncompatibleNumber() { AssertFloatingPointIncompatible_Fails<byte>(); AssertFloatingPointIncompatible_Fails<sbyte>(); AssertFloatingPointIncompatible_Fails<short>(); AssertFloatingPointIncompatible_Fails<int>(); AssertFloatingPointIncompatible_Fails<long>(); AssertFloatingPointIncompatible_Fails<ushort>(); AssertFloatingPointIncompatible_Fails<uint>(); AssertFloatingPointIncompatible_Fails<ulong>(); AssertFloatingPointIncompatible_Fails<decimal>(); AssertFloatingPointIncompatible_Fails<byte?>(); AssertFloatingPointIncompatible_Fails<sbyte?>(); AssertFloatingPointIncompatible_Fails<short?>(); AssertFloatingPointIncompatible_Fails<int?>(); AssertFloatingPointIncompatible_Fails<long?>(); AssertFloatingPointIncompatible_Fails<ushort?>(); AssertFloatingPointIncompatible_Fails<uint?>(); AssertFloatingPointIncompatible_Fails<ulong?>(); AssertFloatingPointIncompatible_Fails<decimal?>(); } private static void AssertFloatingPointIncompatible_Fails<T>() { string[] testCases = new[] { @"""NaN""", @"""Infinity""", @"""-Infinity""", }; foreach (string test in testCases) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(test, s_optionReadFromStrAllowFloatConstants)); } } [Fact] public static void UnsupportedFormats() { AssertUnsupportedFormatThrows<byte>(); AssertUnsupportedFormatThrows<sbyte>(); AssertUnsupportedFormatThrows<short>(); AssertUnsupportedFormatThrows<int>(); AssertUnsupportedFormatThrows<long>(); AssertUnsupportedFormatThrows<ushort>(); AssertUnsupportedFormatThrows<uint>(); AssertUnsupportedFormatThrows<ulong>(); AssertUnsupportedFormatThrows<float>(); AssertUnsupportedFormatThrows<decimal>(); AssertUnsupportedFormatThrows<byte?>(); AssertUnsupportedFormatThrows<sbyte?>(); AssertUnsupportedFormatThrows<short?>(); AssertUnsupportedFormatThrows<int?>(); AssertUnsupportedFormatThrows<long?>(); AssertUnsupportedFormatThrows<ushort?>(); AssertUnsupportedFormatThrows<uint?>(); AssertUnsupportedFormatThrows<ulong?>(); AssertUnsupportedFormatThrows<float?>(); AssertUnsupportedFormatThrows<decimal?>(); } private static void AssertUnsupportedFormatThrows<T>() { string[] testCases = new[] { "$123.46", // Currency "100.00 %", // Percent "1234,57", // Fixed point "00FF", // Hexadecimal }; foreach (string test in testCases) { Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(test, s_optionReadFromStr)); } } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/49936", TestPlatforms.Android)] public static void EscapingTest() { // Cause all characters to be escaped. var encoderSettings = new TextEncoderSettings(); encoderSettings.ForbidCharacters('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '+', '-', 'e', 'E'); JavaScriptEncoder encoder = JavaScriptEncoder.Create(encoderSettings); var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Encoder = encoder }; PerformEscapingTest(JsonNumberTestData.Bytes, options); PerformEscapingTest(JsonNumberTestData.SBytes, options); PerformEscapingTest(JsonNumberTestData.Shorts, options); PerformEscapingTest(JsonNumberTestData.Ints, options); PerformEscapingTest(JsonNumberTestData.Longs, options); PerformEscapingTest(JsonNumberTestData.UShorts, options); PerformEscapingTest(JsonNumberTestData.UInts, options); PerformEscapingTest(JsonNumberTestData.ULongs, options); PerformEscapingTest(JsonNumberTestData.Floats, options); PerformEscapingTest(JsonNumberTestData.Doubles, options); PerformEscapingTest(JsonNumberTestData.Decimals, options); } private static void PerformEscapingTest<T>(List<T> numbers, JsonSerializerOptions options) { // All input characters are escaped IEnumerable<string> numbersAsStrings = numbers.Select(num => GetNumberAsString(num)); string input = JsonSerializer.Serialize(numbersAsStrings, options); AssertListNumbersEscaped(input); // Unescaping works List<T> deserialized = JsonSerializer.Deserialize<List<T>>(input, options); Assert.Equal(numbers.Count, deserialized.Count); for (int i = 0; i < numbers.Count; i++) { Assert.Equal(numbers[i], deserialized[i]); } // Every number is written as a string, and custom escaping is not honored. string serialized = JsonSerializer.Serialize(deserialized, options); AssertListNumbersUnescaped(serialized); } private static void AssertListNumbersEscaped(string json) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(json)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndArray) { break; } else { Assert.Equal(JsonTokenType.String, reader.TokenType); Assert.True(reader.ValueSpan.IndexOf((byte)'\\') != -1); } } } private static void AssertListNumbersUnescaped(string json) { var reader = new Utf8JsonReader(Encoding.UTF8.GetBytes(json)); reader.Read(); while (reader.Read()) { if (reader.TokenType == JsonTokenType.EndArray) { break; } else { Assert.Equal(JsonTokenType.String, reader.TokenType); Assert.True(reader.ValueSpan.IndexOf((byte)'\\') == -1); } } } [Fact] public static void Number_RoundtripNull() { Perform_Number_RoundTripNull_Test<byte>(); Perform_Number_RoundTripNull_Test<sbyte>(); Perform_Number_RoundTripNull_Test<short>(); Perform_Number_RoundTripNull_Test<int>(); Perform_Number_RoundTripNull_Test<long>(); Perform_Number_RoundTripNull_Test<ushort>(); Perform_Number_RoundTripNull_Test<uint>(); Perform_Number_RoundTripNull_Test<ulong>(); Perform_Number_RoundTripNull_Test<float>(); Perform_Number_RoundTripNull_Test<decimal>(); } private static void Perform_Number_RoundTripNull_Test<T>() { string nullAsJson = "null"; string nullAsQuotedJson = $@"""{nullAsJson}"""; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsJson, s_optionReadAndWriteFromStr)); Assert.Equal("0", JsonSerializer.Serialize(default(T))); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsQuotedJson, s_optionReadAndWriteFromStr)); } [Fact] public static void NullableNumber_RoundtripNull() { Perform_NullableNumber_RoundTripNull_Test<byte?>(); Perform_NullableNumber_RoundTripNull_Test<sbyte?>(); Perform_NullableNumber_RoundTripNull_Test<short?>(); Perform_NullableNumber_RoundTripNull_Test<int?>(); Perform_NullableNumber_RoundTripNull_Test<long?>(); Perform_NullableNumber_RoundTripNull_Test<ushort?>(); Perform_NullableNumber_RoundTripNull_Test<uint?>(); Perform_NullableNumber_RoundTripNull_Test<ulong?>(); Perform_NullableNumber_RoundTripNull_Test<float?>(); Perform_NullableNumber_RoundTripNull_Test<decimal?>(); } private static void Perform_NullableNumber_RoundTripNull_Test<T>() { string nullAsJson = "null"; string nullAsQuotedJson = $@"""{nullAsJson}"""; Assert.Null(JsonSerializer.Deserialize<T>(nullAsJson, s_optionReadAndWriteFromStr)); Assert.Equal(nullAsJson, JsonSerializer.Serialize(default(T))); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<T>(nullAsQuotedJson, s_optionReadAndWriteFromStr)); } [Fact] public static void Disallow_ArbritaryStrings_On_AllowFloatingPointConstants() { string json = @"""12345"""; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<byte>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<sbyte>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<short>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<int>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<long>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ushort>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<uint>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ulong>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<decimal>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<byte?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<sbyte?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<short?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<int?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<long?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ushort?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<uint?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ulong?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<float?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<double?>(json, s_optionsAllowFloatConstants)); Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<decimal?>(json, s_optionsAllowFloatConstants)); } [Fact] public static void Attributes_OnMembers_Work() { // Bad JSON because Int should not be string. string intIsString = @"{""Float"":""1234.5"",""Int"":""12345""}"; // Good JSON because Float can be string. string floatIsString = @"{""Float"":""1234.5"",""Int"":12345}"; // Good JSON because Float can be number. string floatIsNumber = @"{""Float"":1234.5,""Int"":12345}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(intIsString)); ClassWith_Attribute_OnNumber obj = JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(floatIsString); Assert.Equal(1234.5, obj.Float); Assert.Equal(12345, obj.Int); obj = JsonSerializer.Deserialize<ClassWith_Attribute_OnNumber>(floatIsNumber); Assert.Equal(1234.5, obj.Float); Assert.Equal(12345, obj.Int); // Per options, float should be written as string. JsonTestHelper.AssertJsonEqual(floatIsString, JsonSerializer.Serialize(obj)); } private class ClassWith_Attribute_OnNumber { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public float Float { get; set; } public int Int { get; set; } } [Fact] public static void Attribute_OnRootType_Works() { // Not allowed string floatIsString = @"{""Float"":""1234"",""Int"":123}"; // Allowed string floatIsNan = @"{""Float"":""NaN"",""Int"":123}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<Type_AllowFloatConstants>(floatIsString)); Type_AllowFloatConstants obj = JsonSerializer.Deserialize<Type_AllowFloatConstants>(floatIsNan); Assert.Equal(float.NaN, obj.Float); Assert.Equal(123, obj.Int); JsonTestHelper.AssertJsonEqual(floatIsNan, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowNamedFloatingPointLiterals)] private class Type_AllowFloatConstants { public float Float { get; set; } public int Int { get; set; } } [Fact] public static void AttributeOnType_WinsOver_GlobalOption() { // Global options strict, type options loose string json = @"{""Float"":""12345""}"; var obj1 = JsonSerializer.Deserialize<ClassWith_LooseAttribute>(json); Assert.Equal(@"{""Float"":""12345""}", JsonSerializer.Serialize(obj1)); // Global options loose, type options strict json = @"{""Float"":""12345""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_StrictAttribute>(json, s_optionReadAndWriteFromStr)); var obj2 = new ClassWith_StrictAttribute() { Float = 12345 }; Assert.Equal(@"{""Float"":12345}", JsonSerializer.Serialize(obj2, s_optionReadAndWriteFromStr)); } [JsonNumberHandling(JsonNumberHandling.Strict)] public class ClassWith_StrictAttribute { public float Float { get; set; } } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] private class ClassWith_LooseAttribute { public float Float { get; set; } } [Fact] public static void AttributeOnMember_WinsOver_AttributeOnType() { string json = @"{""Double"":""NaN""}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_Attribute_On_TypeAndMember>(json)); var obj = new ClassWith_Attribute_On_TypeAndMember { Double = float.NaN }; Assert.Throws<ArgumentException>(() => JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowNamedFloatingPointLiterals)] private class ClassWith_Attribute_On_TypeAndMember { [JsonNumberHandling(JsonNumberHandling.Strict)] public double Double { get; set; } } [Fact] public static void Attribute_OnNestedType_Works() { string jsonWithShortProperty = @"{""Short"":""1""}"; ClassWith_ReadAsStringAttribute obj = JsonSerializer.Deserialize<ClassWith_ReadAsStringAttribute>(jsonWithShortProperty); Assert.Equal(1, obj.Short); string jsonWithMyObjectProperty = @"{""MyObject"":{""Float"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWith_ReadAsStringAttribute>(jsonWithMyObjectProperty)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString)] public class ClassWith_ReadAsStringAttribute { public short Short { get; set; } public ClassWith_StrictAttribute MyObject { get; set; } } [Fact] public static void MemberAttributeAppliesToCollection_SimpleElements() { RunTest<int[]>(); RunTest<ConcurrentQueue<int>>(); RunTest<GenericICollectionWrapper<int>>(); RunTest<IEnumerable<int>>(); RunTest<Collection<int>>(); RunTest<ImmutableList<int>>(); RunTest<HashSet<int>>(); RunTest<List<int>>(); RunTest<IList<int>>(); RunTest<IList>(); RunTest<Queue<int>>(); static void RunTest<T>() { string json = @"{""MyList"":[""1"",""2""]}"; ClassWithSimpleCollectionProperty<T> obj = global::System.Text.Json.JsonSerializer.Deserialize<ClassWithSimpleCollectionProperty<T>>(json); Assert.Equal(json, global::System.Text.Json.JsonSerializer.Serialize(obj)); } } public class ClassWithSimpleCollectionProperty<T> { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public T MyList { get; set; } } [Fact] public static void NestedCollectionElementTypeHandling_Overrides_GlobalOption() { // Strict policy on the collection element type overrides read-as-string on the collection property string json = @"{""MyList"":[{""Float"":""1""}]}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithComplexListProperty>(json, s_optionReadAndWriteFromStr)); // Strict policy on the collection element type overrides write-as-string on the collection property var obj = new ClassWithComplexListProperty { MyList = new List<ClassWith_StrictAttribute> { new ClassWith_StrictAttribute { Float = 1 } } }; Assert.Equal(@"{""MyList"":[{""Float"":1}]}", JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr)); } public class ClassWithComplexListProperty { public List<ClassWith_StrictAttribute> MyList { get; set; } } [Fact] public static void NumberHandlingAttribute_NotAllowedOn_CollectionOfNonNumbers() { Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWith_AttributeOnComplexListProperty>("")); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWith_AttributeOnComplexListProperty())); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWith_AttributeOnComplexDictionaryProperty>("")); Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWith_AttributeOnComplexDictionaryProperty())); } public class ClassWith_AttributeOnComplexListProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<ClassWith_StrictAttribute> MyList { get; set; } } public class ClassWith_AttributeOnComplexDictionaryProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString)] public Dictionary<string, ClassWith_StrictAttribute> MyDictionary { get; set; } } [Fact] public static void MemberAttributeAppliesToDictionary_SimpleElements() { string json = @"{""First"":""1"",""Second"":""2""}"; ClassWithSimpleDictionaryProperty obj = JsonSerializer.Deserialize<ClassWithSimpleDictionaryProperty>(json); } public class ClassWithSimpleDictionaryProperty { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public Dictionary<string, int> MyDictionary { get; set; } } [Fact] public static void NestedDictionaryElementTypeHandling_Overrides_GlobalOption() { // Strict policy on the dictionary element type overrides read-as-string on the collection property. string json = @"{""MyDictionary"":{""Key"":{""Float"":""1""}}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<ClassWithComplexDictionaryProperty>(json, s_optionReadFromStr)); // Strict policy on the collection element type overrides write-as-string on the collection property var obj = new ClassWithComplexDictionaryProperty { MyDictionary = new Dictionary<string, ClassWith_StrictAttribute> { ["Key"] = new ClassWith_StrictAttribute { Float = 1 } } }; Assert.Equal(@"{""MyDictionary"":{""Key"":{""Float"":1}}}", JsonSerializer.Serialize(obj, s_optionReadFromStr)); } public class ClassWithComplexDictionaryProperty { public Dictionary<string, ClassWith_StrictAttribute> MyDictionary { get; set; } } [Fact] public static void TypeAttributeAppliesTo_CustomCollectionElements() { string json = @"[""1""]"; MyCustomList obj = JsonSerializer.Deserialize<MyCustomList>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class MyCustomList : List<int> { } [Fact] public static void TypeAttributeAppliesTo_CustomCollectionElements_HonoredWhenProperty() { string json = @"{""List"":[""1""]}"; ClassWithCustomList obj = JsonSerializer.Deserialize<ClassWithCustomList>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } public class ClassWithCustomList { public MyCustomList List { get; set; } } [Fact] public static void TypeAttributeAppliesTo_CustomDictionaryElements() { string json = @"{""Key"":""1""}"; MyCustomDictionary obj = JsonSerializer.Deserialize<MyCustomDictionary>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class MyCustomDictionary : Dictionary<string, int> { } [Fact] public static void TypeAttributeAppliesTo_CustomDictionaryElements_HonoredWhenProperty() { string json = @"{""Dictionary"":{""Key"":""1""}}"; ClassWithCustomDictionary obj = JsonSerializer.Deserialize<ClassWithCustomDictionary>(json); Assert.Equal(json, JsonSerializer.Serialize(obj)); } public class ClassWithCustomDictionary { public MyCustomDictionary Dictionary { get; set; } } [Fact] public static void Attribute_OnType_NotRecursive() { // Recursive behavior, where number handling setting on a property is applied to subsequent // properties in its type closure, would allow a string number. This is not supported. string json = @"{""NestedClass"":{""MyInt"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<AttributeAppliedToFirstLevelProp>(json)); var obj = new AttributeAppliedToFirstLevelProp { NestedClass = new NonNumberType { MyInt = 1 } }; Assert.Equal(@"{""NestedClass"":{""MyInt"":1}}", JsonSerializer.Serialize(obj)); } [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public class AttributeAppliedToFirstLevelProp { public NonNumberType NestedClass { get; set; } } public class NonNumberType { public int MyInt { get; set; } } [Fact] public static void HandlingOnMemberOverridesHandlingOnType_Enumerable() { string json = @"{""List"":[""1""]}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyCustomListWrapper>(json)); var obj = new MyCustomListWrapper { List = new MyCustomList { 1 } }; Assert.Equal(@"{""List"":[1]}", JsonSerializer.Serialize(obj)); } public class MyCustomListWrapper { [JsonNumberHandling(JsonNumberHandling.Strict)] public MyCustomList List { get; set; } } [Fact] public static void HandlingOnMemberOverridesHandlingOnType_Dictionary() { string json = @"{""Dictionary"":{""Key"":""1""}}"; Assert.Throws<JsonException>(() => JsonSerializer.Deserialize<MyCustomDictionaryWrapper>(json)); var obj1 = new MyCustomDictionaryWrapper { Dictionary = new MyCustomDictionary { ["Key"] = 1 } }; Assert.Equal(@"{""Dictionary"":{""Key"":1}}", JsonSerializer.Serialize(obj1)); } public class MyCustomDictionaryWrapper { [JsonNumberHandling(JsonNumberHandling.Strict)] public MyCustomDictionary Dictionary { get; set; } } [Fact] public static void Attribute_Allowed_On_NonNumber_NonCollection_Property() { const string Json = @"{""MyProp"":{""MyInt"":1}}"; ClassWith_NumberHandlingOn_ObjectProperty obj = JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_ObjectProperty>(Json); Assert.Equal(1, obj.MyProp.MyInt); string json = JsonSerializer.Serialize(obj); Assert.Equal(Json, json); } public class ClassWith_NumberHandlingOn_ObjectProperty { [JsonNumberHandling(JsonNumberHandling.Strict)] public NonNumberType MyProp { get; set; } } [Fact] public static void Attribute_Allowed_On_Property_WithCustomConverter() { string json = @"{""Prop"":1}"; // Converter returns 25 regardless of input. var obj = JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_Property_WithCustomConverter>(json); Assert.Equal(25, obj.Prop); // Converter throws this exception regardless of input. NotImplementedException ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj)); Assert.Equal("Converter was called", ex.Message); } public class ClassWith_NumberHandlingOn_Property_WithCustomConverter { [JsonNumberHandling(JsonNumberHandling.Strict)] [JsonConverter(typeof(ConverterForInt32))] public int Prop { get; set; } } [Fact] public static void Attribute_Allowed_On_Type_WithCustomConverter() { string json = @"{}"; NotImplementedException ex; // Assert regular Read/Write methods on custom converter are called. ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Deserialize<ClassWith_NumberHandlingOn_Type_WithCustomConverter>(json)); Assert.Equal("Converter was called", ex.Message); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(new ClassWith_NumberHandlingOn_Type_WithCustomConverter())); Assert.Equal("Converter was called", ex.Message); } [JsonNumberHandling(JsonNumberHandling.Strict)] [JsonConverter(typeof(ConverterForMyType))] public class ClassWith_NumberHandlingOn_Type_WithCustomConverter { } private class ConverterForMyType : JsonConverter<ClassWith_NumberHandlingOn_Type_WithCustomConverter> { public override ClassWith_NumberHandlingOn_Type_WithCustomConverter Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } public override void Write(Utf8JsonWriter writer, ClassWith_NumberHandlingOn_Type_WithCustomConverter value, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } } [Fact] public static void CustomConverterOverridesBuiltInLogic() { var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForInt32(), new ConverterForFloat() } }; string json = @"""32"""; // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<int>(json, options)); // Converter throws this exception regardless of input. NotImplementedException ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(4, options)); Assert.Equal("Converter was called", ex.Message); json = @"""NaN"""; // Converter returns 25 if NaN. Assert.Equal(25, JsonSerializer.Deserialize<float?>(json, options)); // Converter writes 25 if NaN. Assert.Equal("25", JsonSerializer.Serialize((float?)float.NaN, options)); } public class ConverterForFloat : JsonConverter<float?> { public override float? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (reader.TokenType == JsonTokenType.String && reader.GetString() == "NaN") { return 25; } throw new NotSupportedException(); } public override void Write(Utf8JsonWriter writer, float? value, JsonSerializerOptions options) { if (float.IsNaN(value.Value)) { writer.WriteNumberValue(25); return; } throw new NotSupportedException(); } } [Fact] public static void JsonNumberHandling_ArgOutOfRangeFail() { // Global options ArgumentOutOfRangeException ex = Assert.Throws<ArgumentOutOfRangeException>( () => new JsonSerializerOptions { NumberHandling = (JsonNumberHandling)(-1) }); Assert.Contains("value", ex.ToString()); Assert.Throws<ArgumentOutOfRangeException>( () => new JsonSerializerOptions { NumberHandling = (JsonNumberHandling)(8) }); ex = Assert.Throws<ArgumentOutOfRangeException>( () => new JsonNumberHandlingAttribute((JsonNumberHandling)(-1))); Assert.Contains("handling", ex.ToString()); Assert.Throws<ArgumentOutOfRangeException>( () => new JsonNumberHandlingAttribute((JsonNumberHandling)(8))); } [Fact] public static void InternalCollectionConverter_CustomNumberConverter_GlobalOption() { NotImplementedException ex; var list = new List<int> { 1 }; var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForInt32() } }; // Assert converter methods are called and not Read/WriteWithNumberHandling (which would throw InvalidOperationException). // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<List<int>>(@"[""1""]", options)[0]); // Converter throws this exception regardless of input. ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(list, options)); Assert.Equal("Converter was called", ex.Message); var list2 = new List<int?> { 1 }; Assert.Equal(25, JsonSerializer.Deserialize<List<int?>>(@"[""1""]", options)[0]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(list2, options)); Assert.Equal("Converter was called", ex.Message); // Okay to set number handling for number collection property when number is handled with custom converter; // converter Read/Write methods called. ClassWithListPropAndAttribute obj1 = JsonSerializer.Deserialize<ClassWithListPropAndAttribute>(@"{""Prop"":[""1""]}", options); Assert.Equal(25, obj1.Prop[0]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj1, options)); Assert.Equal("Converter was called", ex.Message); ClassWithDictPropAndAttribute obj2 = JsonSerializer.Deserialize<ClassWithDictPropAndAttribute>(@"{""Prop"":{""1"":""1""}}", options); Assert.Equal(25, obj2.Prop[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj2, options)); Assert.Equal("Converter was called", ex.Message); } private class ClassWithListPropAndAttribute { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public List<int> Prop { get; set; } } private class ClassWithDictPropAndAttribute { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] public Dictionary<int, int?> Prop { get; set; } } [Fact] public static void InternalCollectionConverter_CustomNumberConverter_OnProperty() { // Invalid to set number handling for number collection property when number is handled with custom converter. var ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWithListPropAndAttribute_ConverterOnProp>("")); Assert.Contains(nameof(ClassWithListPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWithListPropAndAttribute_ConverterOnProp())); Assert.Contains(nameof(ClassWithListPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Deserialize<ClassWithDictPropAndAttribute_ConverterOnProp>("")); Assert.Contains(nameof(ClassWithDictPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); ex = Assert.Throws<InvalidOperationException>(() => JsonSerializer.Serialize(new ClassWithDictPropAndAttribute_ConverterOnProp())); Assert.Contains(nameof(ClassWithDictPropAndAttribute_ConverterOnProp), ex.ToString()); Assert.Contains("IntProp", ex.ToString()); } private class ClassWithListPropAndAttribute_ConverterOnProp { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] [JsonConverter(typeof(ListOfIntConverter))] public List<int> IntProp { get; set; } } private class ClassWithDictPropAndAttribute_ConverterOnProp { [JsonNumberHandling(JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString)] [JsonConverter(typeof(ClassWithDictPropAndAttribute_ConverterOnProp))] public Dictionary<int, int?> IntProp { get; set; } } public class ListOfIntConverter : JsonConverter<List<int>> { public override List<int> Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) => throw new NotImplementedException(); public override void Write(Utf8JsonWriter writer, List<int> value, JsonSerializerOptions options) => throw new NotImplementedException(); } [Fact] public static void InternalCollectionConverter_CustomNullableNumberConverter() { NotImplementedException ex; var dict = new Dictionary<int, int?> { [1] = 1 }; var options = new JsonSerializerOptions(s_optionReadAndWriteFromStr) { Converters = { new ConverterForNullableInt32() } }; // Assert converter methods are called and not Read/WriteWithNumberHandling (which would throw InvalidOperationException). // Converter returns 25 regardless of input. Assert.Equal(25, JsonSerializer.Deserialize<Dictionary<int, int?>>(@"{""1"":""1""}", options)[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(dict, options)); Assert.Equal("Converter was called", ex.Message); var obj = JsonSerializer.Deserialize<ClassWithDictPropAndAttribute>(@"{""Prop"":{""1"":""1""}}", options); Assert.Equal(25, obj.Prop[1]); ex = Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(obj, options)); Assert.Throws<NotImplementedException>(() => JsonSerializer.Serialize(dict, options)); Assert.Equal("Converter was called", ex.Message); } public class ConverterForNullableInt32 : JsonConverter<int?> { public override int? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { return 25; } public override void Write(Utf8JsonWriter writer, int? value, JsonSerializerOptions options) { throw new NotImplementedException("Converter was called"); } } /// <summary> /// Example of a custom converter that uses the options to determine behavior. /// </summary> [Fact] public static void AdaptableCustomConverter() { // Baseline without custom converter PlainClassWithList obj = new() { Prop = new List<int>() { 1 } }; string json = JsonSerializer.Serialize(obj, s_optionReadAndWriteFromStr); Assert.Equal("{\"Prop\":[\"1\"]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, s_optionReadAndWriteFromStr); Assert.Equal(1, obj.Prop[0]); // First with numbers JsonSerializerOptions options = new() { Converters = { new AdaptableInt32Converter() } }; obj = new() { Prop = new List<int>() { 1 } }; json = JsonSerializer.Serialize(obj, options); Assert.Equal("{\"Prop\":[101]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, options); Assert.Equal(1, obj.Prop[0]); // Then with strings options = new() { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString, Converters = { new AdaptableInt32Converter() } }; obj = new() { Prop = new List<int>() { 1 } }; json = JsonSerializer.Serialize(obj, options); Assert.Equal("{\"Prop\":[\"101\"]}", json); obj = JsonSerializer.Deserialize<PlainClassWithList>(json, options); Assert.Equal(1, obj.Prop[0]); } private class PlainClassWithList { public List<int> Prop { get; set; } } public class AdaptableInt32Converter : JsonConverter<int> { public override int Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if ((JsonNumberHandling.AllowReadingFromString & options.NumberHandling) != 0) { // Assume it's a string; don't use TryParse(). return int.Parse(reader.GetString(), CultureInfo.InvariantCulture) - 100; } else { return reader.GetInt32() - 100; } } public override void Write(Utf8JsonWriter writer, int value, JsonSerializerOptions options) { if ((JsonNumberHandling.WriteAsString & options.NumberHandling) != 0) { writer.WriteStringValue((value + 100).ToString(CultureInfo.InvariantCulture)); } else { writer.WriteNumberValue(value + 100); } } } } public class NumberHandlingTests_AsyncStreamOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_AsyncStreamOverload() : base(JsonSerializerWrapperForString.AsyncStreamSerializer) { } } public class NumberHandlingTests_SyncStreamOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_SyncStreamOverload() : base(JsonSerializerWrapperForString.SyncStreamSerializer) { } } public class NumberHandlingTests_SyncOverload : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_SyncOverload() : base(JsonSerializerWrapperForString.StringSerializer) { } } public class NumberHandlingTests_Document : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Document() : base(JsonSerializerWrapperForString.DocumentSerializer) { } } public class NumberHandlingTests_Element : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Element() : base(JsonSerializerWrapperForString.ElementSerializer) { } } public class NumberHandlingTests_Node : NumberHandlingTests_OverloadSpecific { public NumberHandlingTests_Node() : base(JsonSerializerWrapperForString.NodeSerializer) { } } public abstract class NumberHandlingTests_OverloadSpecific { private JsonSerializerWrapperForString Deserializer { get; } public NumberHandlingTests_OverloadSpecific(JsonSerializerWrapperForString deserializer) { Deserializer = deserializer; } [Theory] [MemberData(nameof(NumberHandling_ForPropsReadAfter_DeserializingCtorParams_TestData))] public async Task NumberHandling_ForPropsReadAfter_DeserializingCtorParams(string json) { JsonSerializerOptions options = new JsonSerializerOptions { NumberHandling = JsonNumberHandling.AllowReadingFromString | JsonNumberHandling.WriteAsString, PropertyNamingPolicy = JsonNamingPolicy.CamelCase, }; Result result = await Deserializer.DeserializeWrapper<Result>(json, options); JsonTestHelper.AssertJsonEqual(json, JsonSerializer.Serialize(result, options)); } public static IEnumerable<object[]> NumberHandling_ForPropsReadAfter_DeserializingCtorParams_TestData() { yield return new object[] { @"{ ""album"": { ""userPlayCount"": ""123"", ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""userPlayCount"": ""123"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""userPlayCount"": ""123"", ""wiki"": { ""summary"": ""a summary of the album"" } } }" }; yield return new object[] { @"{ ""album"": { ""name"": ""the name of the album"", ""artist"": ""the name of the artist"", ""wiki"": { ""summary"": ""a summary of the album"" }, ""userPlayCount"": ""123"" } }" }; } public class Result { public Album Album { get; init; } } public class Album { public Album(string name, string artist) { Name = name; Artist = artist; } public string Name { get; init; } public string Artist { get; init; } public long? userPlayCount { get; init; } [JsonExtensionData] public Dictionary<string, JsonElement> ExtensionData { get; set; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/HardwareIntrinsics/X86/Sse2/ShiftLeftLogical128BitLane.Byte.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftLeftLogical128BitLaneByte1() { var test = new ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1 { private struct TestStruct { public Vector128<Byte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1 testClass) { var result = Sse2.ShiftLeftLogical128BitLane(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar; private Vector128<Byte> _fld; private SimpleUnaryOpTest__DataTable<Byte, Byte> _dataTable; static ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } _dataTable = new SimpleUnaryOpTest__DataTable<Byte, Byte>(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse2.ShiftLeftLogical128BitLane( Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse2.ShiftLeftLogical128BitLane( Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse2.ShiftLeftLogical128BitLane( Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse2.ShiftLeftLogical128BitLane( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1(); var result = Sse2.ShiftLeftLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse2.ShiftLeftLogical128BitLane(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse2.ShiftLeftLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != 0) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != 8) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse2)}.{nameof(Sse2.ShiftLeftLogical128BitLane)}<Byte>(Vector128<Byte><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftLeftLogical128BitLaneByte1() { var test = new ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse2.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1 { private struct TestStruct { public Vector128<Byte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1 testClass) { var result = Sse2.ShiftLeftLogical128BitLane(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar; private Vector128<Byte> _fld; private SimpleUnaryOpTest__DataTable<Byte, Byte> _dataTable; static ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); } public ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (byte)8; } _dataTable = new SimpleUnaryOpTest__DataTable<Byte, Byte>(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse2.ShiftLeftLogical128BitLane( Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse2.ShiftLeftLogical128BitLane( Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse2.ShiftLeftLogical128BitLane( Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse2).GetMethod(nameof(Sse2.ShiftLeftLogical128BitLane), new Type[] { typeof(Vector128<Byte>), typeof(byte) }) .Invoke(null, new object[] { Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse2.ShiftLeftLogical128BitLane( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector128<Byte>>(_dataTable.inArrayPtr); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Sse2.LoadVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Sse2.LoadAlignedVector128((Byte*)(_dataTable.inArrayPtr)); var result = Sse2.ShiftLeftLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogical128BitLaneByte1(); var result = Sse2.ShiftLeftLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse2.ShiftLeftLogical128BitLane(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse2.ShiftLeftLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != 0) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != 8) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse2)}.{nameof(Sse2.ShiftLeftLogical128BitLane)}<Byte>(Vector128<Byte><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/Microsoft.Extensions.Logging.Configuration/src/LoggerFilterConfigureOptions.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Options; namespace Microsoft.Extensions.Logging { internal sealed class LoggerFilterConfigureOptions : IConfigureOptions<LoggerFilterOptions> { private const string LogLevelKey = "LogLevel"; private const string DefaultCategory = "Default"; private readonly IConfiguration _configuration; public LoggerFilterConfigureOptions(IConfiguration configuration) { _configuration = configuration; } public void Configure(LoggerFilterOptions options) { LoadDefaultConfigValues(options); } private void LoadDefaultConfigValues(LoggerFilterOptions options) { if (_configuration == null) { return; } options.CaptureScopes = GetCaptureScopesValue(options); foreach (IConfigurationSection configurationSection in _configuration.GetChildren()) { if (configurationSection.Key.Equals(LogLevelKey, StringComparison.OrdinalIgnoreCase)) { // Load global category defaults LoadRules(options, configurationSection, null); } else { IConfigurationSection logLevelSection = configurationSection.GetSection(LogLevelKey); if (logLevelSection != null) { // Load logger specific rules string logger = configurationSection.Key; LoadRules(options, logLevelSection, logger); } } } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "IConfiguration.GetValue is safe when T is a bool.")] bool GetCaptureScopesValue(LoggerFilterOptions options) => _configuration.GetValue(nameof(options.CaptureScopes), options.CaptureScopes); } private void LoadRules(LoggerFilterOptions options, IConfigurationSection configurationSection, string logger) { foreach (System.Collections.Generic.KeyValuePair<string, string> section in configurationSection.AsEnumerable(true)) { if (TryGetSwitch(section.Value, out LogLevel level)) { string category = section.Key; if (category.Equals(DefaultCategory, StringComparison.OrdinalIgnoreCase)) { category = null; } var newRule = new LoggerFilterRule(logger, category, level, null); options.Rules.Add(newRule); } } } private static bool TryGetSwitch(string value, out LogLevel level) { if (string.IsNullOrEmpty(value)) { level = LogLevel.None; return false; } else if (Enum.TryParse(value, true, out level)) { return true; } else { throw new InvalidOperationException(SR.Format(SR.ValueNotSupported, value)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Options; namespace Microsoft.Extensions.Logging { internal sealed class LoggerFilterConfigureOptions : IConfigureOptions<LoggerFilterOptions> { private const string LogLevelKey = "LogLevel"; private const string DefaultCategory = "Default"; private readonly IConfiguration _configuration; public LoggerFilterConfigureOptions(IConfiguration configuration) { _configuration = configuration; } public void Configure(LoggerFilterOptions options) { LoadDefaultConfigValues(options); } private void LoadDefaultConfigValues(LoggerFilterOptions options) { if (_configuration == null) { return; } options.CaptureScopes = GetCaptureScopesValue(options); foreach (IConfigurationSection configurationSection in _configuration.GetChildren()) { if (configurationSection.Key.Equals(LogLevelKey, StringComparison.OrdinalIgnoreCase)) { // Load global category defaults LoadRules(options, configurationSection, null); } else { IConfigurationSection logLevelSection = configurationSection.GetSection(LogLevelKey); if (logLevelSection != null) { // Load logger specific rules string logger = configurationSection.Key; LoadRules(options, logLevelSection, logger); } } } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "IConfiguration.GetValue is safe when T is a bool.")] bool GetCaptureScopesValue(LoggerFilterOptions options) => _configuration.GetValue(nameof(options.CaptureScopes), options.CaptureScopes); } private void LoadRules(LoggerFilterOptions options, IConfigurationSection configurationSection, string logger) { foreach (System.Collections.Generic.KeyValuePair<string, string> section in configurationSection.AsEnumerable(true)) { if (TryGetSwitch(section.Value, out LogLevel level)) { string category = section.Key; if (category.Equals(DefaultCategory, StringComparison.OrdinalIgnoreCase)) { category = null; } var newRule = new LoggerFilterRule(logger, category, level, null); options.Rules.Add(newRule); } } } private static bool TryGetSwitch(string value, out LogLevel level) { if (string.IsNullOrEmpty(value)) { level = LogLevel.None; return false; } else if (Enum.TryParse(value, true, out level)) { return true; } else { throw new InvalidOperationException(SR.Format(SR.ValueNotSupported, value)); } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/baseservices/threading/generics/TimerCallback/thread15.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; interface IGen<T> { void Target(object p); T Dummy(T t); } class GenInt : IGen<int> { public int Dummy(int t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<int> obj = new GenInt(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenDouble : IGen<double> { public double Dummy(double t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<double> obj = new GenDouble(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenString : IGen<string> { public string Dummy(string t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<string> obj = new GenString(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenObject : IGen<object> { public object Dummy(object t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<object> obj = new GenObject(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenGuid : IGen<Guid> { public Guid Dummy(Guid t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<Guid> obj = new GenGuid(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } public class Test_thread15 { public static int delay = 0; public static int period = 2; public static int nThreads = 5; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { GenInt.ThreadPoolTest(); GenDouble.ThreadPoolTest(); GenString.ThreadPoolTest(); GenObject.ThreadPoolTest(); GenGuid.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; interface IGen<T> { void Target(object p); T Dummy(T t); } class GenInt : IGen<int> { public int Dummy(int t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<int> obj = new GenInt(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenDouble : IGen<double> { public double Dummy(double t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<double> obj = new GenDouble(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenString : IGen<string> { public string Dummy(string t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<string> obj = new GenString(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenObject : IGen<object> { public object Dummy(object t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<object> obj = new GenObject(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenGuid : IGen<Guid> { public Guid Dummy(Guid t) { return t; } public virtual void Target(object p) { if (Test_thread15.Xcounter>=Test_thread15.nThreads) { ManualResetEvent evt = (ManualResetEvent) p; evt.Set(); } else { Interlocked.Increment(ref Test_thread15.Xcounter); } } public static void ThreadPoolTest() { ManualResetEvent evt = new ManualResetEvent(false); IGen<Guid> obj = new GenGuid(); TimerCallback tcb = new TimerCallback(obj.Target); Timer timer = new Timer(tcb,evt,Test_thread15.delay,Test_thread15.period); evt.WaitOne(); timer.Dispose(); Test_thread15.Eval(Test_thread15.Xcounter>=Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } public class Test_thread15 { public static int delay = 0; public static int period = 2; public static int nThreads = 5; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { GenInt.ThreadPoolTest(); GenDouble.ThreadPoolTest(); GenString.ThreadPoolTest(); GenObject.ThreadPoolTest(); GenGuid.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/Configuration.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Configuration.Internal; using System.IO; using System.Runtime.Versioning; namespace System.Configuration { // An instance of the Configuration class represents a single level // in the configuration hierarchy. Its contents can be edited and // saved to disk. // // It is not thread safe for writing. public sealed class Configuration { private readonly MgmtConfigurationRecord _configRecord; private readonly object[] _hostInitConfigurationParams; private readonly Type _typeConfigHost; private Func<string, string> _assemblyStringTransformer; private ContextInformation _evalContext; private ConfigurationLocationCollection _locations; private ConfigurationSectionGroup _rootSectionGroup; private Stack _sectionsStack; private Func<string, string> _typeStringTransformer; internal Configuration(string locationSubPath, Type typeConfigHost, params object[] hostInitConfigurationParams) { _typeConfigHost = typeConfigHost; _hostInitConfigurationParams = hostInitConfigurationParams; IInternalConfigHost configHost = (IInternalConfigHost)TypeUtil.CreateInstance(typeConfigHost); // Wrap the host with the UpdateConfigHost to support SaveAs. UpdateConfigHost updateConfigHost = new UpdateConfigHost(configHost); // Now wrap in ImplicitMachineConfigHost so we can stub in a simple machine.config if needed. IInternalConfigHost implicitMachineConfigHost = new ImplicitMachineConfigHost(updateConfigHost); InternalConfigRoot configRoot = new InternalConfigRoot(this, updateConfigHost); ((IInternalConfigRoot)configRoot).Init(implicitMachineConfigHost, isDesignTime: true); // Set the configuration paths for this Configuration. // // We do this in a separate step so that the WebConfigurationHost // can use this object's _configRoot to get the <sites> section, // which is used in it's MapPath implementation. string configPath, locationConfigPath; implicitMachineConfigHost.InitForConfiguration( ref locationSubPath, out configPath, out locationConfigPath, configRoot, hostInitConfigurationParams); if (!string.IsNullOrEmpty(locationSubPath) && !implicitMachineConfigHost.SupportsLocation) throw ExceptionUtil.UnexpectedError("Configuration::ctor"); if (string.IsNullOrEmpty(locationSubPath) != string.IsNullOrEmpty(locationConfigPath)) throw ExceptionUtil.UnexpectedError("Configuration::ctor"); // Get the configuration record for this config file. _configRecord = (MgmtConfigurationRecord)configRoot.GetConfigRecord(configPath); // Create another MgmtConfigurationRecord for the location that is a child of the above record. // Note that this does not match the resolution hiearchy that is used at runtime. if (!string.IsNullOrEmpty(locationSubPath)) { _configRecord = MgmtConfigurationRecord.Create( configRoot, _configRecord, locationConfigPath, locationSubPath); } // Throw if the config record we created contains global errors. _configRecord.ThrowIfInitErrors(); } public AppSettingsSection AppSettings => (AppSettingsSection)GetSection("appSettings"); public ConnectionStringsSection ConnectionStrings => (ConnectionStringsSection)GetSection("connectionStrings"); public string FilePath => _configRecord.ConfigurationFilePath; public bool HasFile => _configRecord.HasStream; public ConfigurationLocationCollection Locations => _locations ?? (_locations = _configRecord.GetLocationCollection(this)); public ContextInformation EvaluationContext => _evalContext ?? (_evalContext = new ContextInformation(_configRecord)); public ConfigurationSectionGroup RootSectionGroup { get { if (_rootSectionGroup == null) { _rootSectionGroup = new ConfigurationSectionGroup(); _rootSectionGroup.RootAttachToConfigurationRecord(_configRecord); } return _rootSectionGroup; } } public ConfigurationSectionCollection Sections => RootSectionGroup.Sections; public ConfigurationSectionGroupCollection SectionGroups => RootSectionGroup.SectionGroups; // Is the namespace declared in the file or not? // // ie. xmlns="http://schemas.microsoft.com/.NetConfiguration/v2.0" // (currently this is the only one we allow) public bool NamespaceDeclared { get { return _configRecord.NamespacePresent; } set { _configRecord.NamespacePresent = value; } } public Func<string, string> TypeStringTransformer { get { return _typeStringTransformer; } set { if (_typeStringTransformer != value) { TypeStringTransformerIsSet = value != null; _typeStringTransformer = value; } } } public Func<string, string> AssemblyStringTransformer { get { return _assemblyStringTransformer; } set { if (_assemblyStringTransformer != value) { AssemblyStringTransformerIsSet = value != null; _assemblyStringTransformer = value; } } } public FrameworkName TargetFramework { get; set; } internal bool TypeStringTransformerIsSet { get; private set; } internal bool AssemblyStringTransformerIsSet { get; private set; } internal Stack SectionsStack => _sectionsStack ?? (_sectionsStack = new Stack()); // Create a new instance of Configuration for the locationSubPath, // with the initialization parameters that were used to create this configuration. internal Configuration OpenLocationConfiguration(string locationSubPath) { return new Configuration(locationSubPath, _typeConfigHost, _hostInitConfigurationParams); } // public methods public ConfigurationSection GetSection(string sectionName) { ConfigurationSection section = (ConfigurationSection)_configRecord.GetSection(sectionName); return section; } public ConfigurationSectionGroup GetSectionGroup(string sectionGroupName) { ConfigurationSectionGroup sectionGroup = _configRecord.GetSectionGroup(sectionGroupName); return sectionGroup; } public void Save() { SaveAsImpl(null, ConfigurationSaveMode.Modified, false); } public void Save(ConfigurationSaveMode saveMode) { SaveAsImpl(null, saveMode, false); } public void Save(ConfigurationSaveMode saveMode, bool forceSaveAll) { SaveAsImpl(null, saveMode, forceSaveAll); } public void SaveAs(string filename) { SaveAs(filename, ConfigurationSaveMode.Modified, false); } public void SaveAs(string filename, ConfigurationSaveMode saveMode) { SaveAs(filename, saveMode, false); } public void SaveAs(string filename, ConfigurationSaveMode saveMode, bool forceSaveAll) { if (string.IsNullOrEmpty(filename)) throw ExceptionUtil.ParameterNullOrEmpty(nameof(filename)); SaveAsImpl(filename, saveMode, forceSaveAll); } private void SaveAsImpl(string filename, ConfigurationSaveMode saveMode, bool forceSaveAll) { filename = string.IsNullOrEmpty(filename) ? null : Path.GetFullPath(filename); if (forceSaveAll) ForceGroupsRecursive(RootSectionGroup); _configRecord.SaveAs(filename, saveMode, forceSaveAll); } // Force all sections and section groups to be instantiated. private void ForceGroupsRecursive(ConfigurationSectionGroup group) { foreach (ConfigurationSection configSection in group.Sections) { // Force the section to be read into the cache _ = group.Sections[configSection.SectionInformation.Name]; } foreach (ConfigurationSectionGroup sectionGroup in group.SectionGroups) ForceGroupsRecursive(group.SectionGroups[sectionGroup.Name]); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Configuration.Internal; using System.IO; using System.Runtime.Versioning; namespace System.Configuration { // An instance of the Configuration class represents a single level // in the configuration hierarchy. Its contents can be edited and // saved to disk. // // It is not thread safe for writing. public sealed class Configuration { private readonly MgmtConfigurationRecord _configRecord; private readonly object[] _hostInitConfigurationParams; private readonly Type _typeConfigHost; private Func<string, string> _assemblyStringTransformer; private ContextInformation _evalContext; private ConfigurationLocationCollection _locations; private ConfigurationSectionGroup _rootSectionGroup; private Stack _sectionsStack; private Func<string, string> _typeStringTransformer; internal Configuration(string locationSubPath, Type typeConfigHost, params object[] hostInitConfigurationParams) { _typeConfigHost = typeConfigHost; _hostInitConfigurationParams = hostInitConfigurationParams; IInternalConfigHost configHost = (IInternalConfigHost)TypeUtil.CreateInstance(typeConfigHost); // Wrap the host with the UpdateConfigHost to support SaveAs. UpdateConfigHost updateConfigHost = new UpdateConfigHost(configHost); // Now wrap in ImplicitMachineConfigHost so we can stub in a simple machine.config if needed. IInternalConfigHost implicitMachineConfigHost = new ImplicitMachineConfigHost(updateConfigHost); InternalConfigRoot configRoot = new InternalConfigRoot(this, updateConfigHost); ((IInternalConfigRoot)configRoot).Init(implicitMachineConfigHost, isDesignTime: true); // Set the configuration paths for this Configuration. // // We do this in a separate step so that the WebConfigurationHost // can use this object's _configRoot to get the <sites> section, // which is used in it's MapPath implementation. string configPath, locationConfigPath; implicitMachineConfigHost.InitForConfiguration( ref locationSubPath, out configPath, out locationConfigPath, configRoot, hostInitConfigurationParams); if (!string.IsNullOrEmpty(locationSubPath) && !implicitMachineConfigHost.SupportsLocation) throw ExceptionUtil.UnexpectedError("Configuration::ctor"); if (string.IsNullOrEmpty(locationSubPath) != string.IsNullOrEmpty(locationConfigPath)) throw ExceptionUtil.UnexpectedError("Configuration::ctor"); // Get the configuration record for this config file. _configRecord = (MgmtConfigurationRecord)configRoot.GetConfigRecord(configPath); // Create another MgmtConfigurationRecord for the location that is a child of the above record. // Note that this does not match the resolution hiearchy that is used at runtime. if (!string.IsNullOrEmpty(locationSubPath)) { _configRecord = MgmtConfigurationRecord.Create( configRoot, _configRecord, locationConfigPath, locationSubPath); } // Throw if the config record we created contains global errors. _configRecord.ThrowIfInitErrors(); } public AppSettingsSection AppSettings => (AppSettingsSection)GetSection("appSettings"); public ConnectionStringsSection ConnectionStrings => (ConnectionStringsSection)GetSection("connectionStrings"); public string FilePath => _configRecord.ConfigurationFilePath; public bool HasFile => _configRecord.HasStream; public ConfigurationLocationCollection Locations => _locations ?? (_locations = _configRecord.GetLocationCollection(this)); public ContextInformation EvaluationContext => _evalContext ?? (_evalContext = new ContextInformation(_configRecord)); public ConfigurationSectionGroup RootSectionGroup { get { if (_rootSectionGroup == null) { _rootSectionGroup = new ConfigurationSectionGroup(); _rootSectionGroup.RootAttachToConfigurationRecord(_configRecord); } return _rootSectionGroup; } } public ConfigurationSectionCollection Sections => RootSectionGroup.Sections; public ConfigurationSectionGroupCollection SectionGroups => RootSectionGroup.SectionGroups; // Is the namespace declared in the file or not? // // ie. xmlns="http://schemas.microsoft.com/.NetConfiguration/v2.0" // (currently this is the only one we allow) public bool NamespaceDeclared { get { return _configRecord.NamespacePresent; } set { _configRecord.NamespacePresent = value; } } public Func<string, string> TypeStringTransformer { get { return _typeStringTransformer; } set { if (_typeStringTransformer != value) { TypeStringTransformerIsSet = value != null; _typeStringTransformer = value; } } } public Func<string, string> AssemblyStringTransformer { get { return _assemblyStringTransformer; } set { if (_assemblyStringTransformer != value) { AssemblyStringTransformerIsSet = value != null; _assemblyStringTransformer = value; } } } public FrameworkName TargetFramework { get; set; } internal bool TypeStringTransformerIsSet { get; private set; } internal bool AssemblyStringTransformerIsSet { get; private set; } internal Stack SectionsStack => _sectionsStack ?? (_sectionsStack = new Stack()); // Create a new instance of Configuration for the locationSubPath, // with the initialization parameters that were used to create this configuration. internal Configuration OpenLocationConfiguration(string locationSubPath) { return new Configuration(locationSubPath, _typeConfigHost, _hostInitConfigurationParams); } // public methods public ConfigurationSection GetSection(string sectionName) { ConfigurationSection section = (ConfigurationSection)_configRecord.GetSection(sectionName); return section; } public ConfigurationSectionGroup GetSectionGroup(string sectionGroupName) { ConfigurationSectionGroup sectionGroup = _configRecord.GetSectionGroup(sectionGroupName); return sectionGroup; } public void Save() { SaveAsImpl(null, ConfigurationSaveMode.Modified, false); } public void Save(ConfigurationSaveMode saveMode) { SaveAsImpl(null, saveMode, false); } public void Save(ConfigurationSaveMode saveMode, bool forceSaveAll) { SaveAsImpl(null, saveMode, forceSaveAll); } public void SaveAs(string filename) { SaveAs(filename, ConfigurationSaveMode.Modified, false); } public void SaveAs(string filename, ConfigurationSaveMode saveMode) { SaveAs(filename, saveMode, false); } public void SaveAs(string filename, ConfigurationSaveMode saveMode, bool forceSaveAll) { if (string.IsNullOrEmpty(filename)) throw ExceptionUtil.ParameterNullOrEmpty(nameof(filename)); SaveAsImpl(filename, saveMode, forceSaveAll); } private void SaveAsImpl(string filename, ConfigurationSaveMode saveMode, bool forceSaveAll) { filename = string.IsNullOrEmpty(filename) ? null : Path.GetFullPath(filename); if (forceSaveAll) ForceGroupsRecursive(RootSectionGroup); _configRecord.SaveAs(filename, saveMode, forceSaveAll); } // Force all sections and section groups to be instantiated. private void ForceGroupsRecursive(ConfigurationSectionGroup group) { foreach (ConfigurationSection configSection in group.Sections) { // Force the section to be read into the cache _ = group.Sections[configSection.SectionInformation.Name]; } foreach (ConfigurationSectionGroup sectionGroup in group.SectionGroups) ForceGroupsRecursive(group.SectionGroups[sectionGroup.Name]); } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Reflection.MetadataLoadContext/src/System/Reflection/DefaultBinder.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Reflection; using System.Reflection.TypeLoading; using CultureInfo = System.Globalization.CultureInfo; namespace System { internal sealed partial class DefaultBinder : Binder { private readonly MetadataLoadContext _loader; private readonly Type? _objectType; internal DefaultBinder(MetadataLoadContext loader) { _loader = loader; _objectType = loader.TryGetCoreType(CoreType.Object); } private bool IsImplementedByMetadataLoadContext(Type type) => type is RoType roType && roType.Loader == _loader; // This method is passed a set of methods and must choose the best // fit. The methods all have the same number of arguments and the object // array args. On exit, this method will choice the best fit method // and coerce the args to match that method. By match, we mean all primitive // arguments are exact matchs and all object arguments are exact or subclasses // of the target. If the target OR is an interface, the object must implement // that interface. There are a couple of exceptions // thrown when a method cannot be returned. If no method matchs the args and // ArgumentException is thrown. If multiple methods match the args then // an AmbiguousMatchException is thrown. // // The most specific match will be selected. // public sealed override MethodBase BindToMethod( BindingFlags bindingAttr, MethodBase[] match, ref object?[] args, ParameterModifier[]? modifiers, CultureInfo? cultureInfo, string[]? names, out object state) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Given a set of fields that match the base criteria, select a field. // if value is null then we have no way to select a field public sealed override FieldInfo BindToField(BindingFlags bindingAttr, FieldInfo[] match, object value, CultureInfo? cultureInfo) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Given a set of methods that match the base criteria, select a method based upon an array of types. // This method should return null if no method matches the criteria. public sealed override MethodBase? SelectMethod(BindingFlags bindingAttr, MethodBase[] match, Type[] types, ParameterModifier[]? modifiers) { int i; int j; Type[] realTypes = new Type[types.Length]; for (i = 0; i < types.Length; i++) { realTypes[i] = types[i].UnderlyingSystemType; if (!(IsImplementedByMetadataLoadContext(realTypes[i]) || realTypes[i].IsSignatureType())) throw new ArgumentException(SR.Arg_MustBeType, nameof(types)); } types = realTypes; // We don't automatically jump out on exact match. if (match == null || match.Length == 0) throw new ArgumentException(SR.Arg_EmptyArray, nameof(match)); MethodBase[] candidates = (MethodBase[])match.Clone(); // Find all the methods that can be described by the types parameter. // Remove all of them that cannot. int curIdx = 0; for (i = 0; i < candidates.Length; i++) { ParameterInfo[] par = candidates[i].GetParametersNoCopy(); if (par.Length != types.Length) continue; for (j = 0; j < types.Length; j++) { Type pCls = par[j].ParameterType; if (types[j].MatchesParameterTypeExactly(par[j])) continue; if (pCls == _objectType) continue; Type? type = types[j]; if (type.IsSignatureType()) { if (!(candidates[i] is MethodInfo methodInfo)) break; type = type.TryResolveAgainstGenericMethod(methodInfo); if (type == null) break; } if (pCls.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(type.UnderlyingSystemType)) || !CanChangePrimitive(type.UnderlyingSystemType, pCls.UnderlyingSystemType)) break; } else { if (!pCls.IsAssignableFrom(type)) break; } } if (j == types.Length) candidates[curIdx++] = candidates[i]; } if (curIdx == 0) return null; if (curIdx == 1) return candidates[0]; // Walk all of the methods looking the most specific method to invoke int currentMin = 0; bool ambig = false; int[] paramOrder = new int[types.Length]; for (i = 0; i < types.Length; i++) paramOrder[i] = i; for (i = 1; i < curIdx; i++) { int newMin = FindMostSpecificMethod( candidates[currentMin], paramOrder, paramArrayType1: null, candidates[i], paramOrder, paramArrayType2: null, types, args: null); if (newMin == 0) { ambig = true; } else if (newMin == 2) { currentMin = i; ambig = false; } } if (ambig) throw new AmbiguousMatchException(); return candidates[currentMin]; } // Given a set of properties that match the base criteria, select one. public sealed override PropertyInfo? SelectProperty(BindingFlags bindingAttr, PropertyInfo[] match, Type? returnType, Type[]? indexes, ParameterModifier[]? modifiers) { // Allow a null indexes array. But if it is not null, every element must be non-null as well. if (indexes != null) { foreach (Type index in indexes) { if (index == null) throw new ArgumentNullException(nameof(indexes)); } } if (match == null || match.Length == 0) throw new ArgumentException(SR.Arg_EmptyArray, nameof(match)); PropertyInfo[] candidates = (PropertyInfo[])match.Clone(); int i, j = 0; // Find all the properties that can be described by type indexes parameter int curIdx = 0; int indexesLength = (indexes != null) ? indexes.Length : 0; for (i = 0; i < candidates.Length; i++) { if (indexes != null) { ParameterInfo[] par = candidates[i].GetIndexParameters(); if (par.Length != indexesLength) continue; for (j = 0; j < indexesLength; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (pCls == indexes[j]) continue; if (pCls == _objectType) continue; if (pCls.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(indexes[j].UnderlyingSystemType)) || !CanChangePrimitive(indexes[j].UnderlyingSystemType, pCls.UnderlyingSystemType)) break; } else { if (!pCls.IsAssignableFrom(indexes[j])) break; } } } if (j == indexesLength) { if (returnType != null) { if (candidates[i].PropertyType.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(returnType.UnderlyingSystemType)) || !CanChangePrimitive(returnType.UnderlyingSystemType, candidates[i].PropertyType.UnderlyingSystemType)) continue; } else { if (!candidates[i].PropertyType.IsAssignableFrom(returnType)) continue; } } candidates[curIdx++] = candidates[i]; } } if (curIdx == 0) return null; if (curIdx == 1) return candidates[0]; // Walk all of the properties looking for the most specific method to invoke int currentMin = 0; bool ambig = false; int[] paramOrder = new int[indexesLength]; for (i = 0; i < indexesLength; i++) paramOrder[i] = i; for (i = 1; i < curIdx; i++) { Debug.Assert(returnType != null); int newMin = FindMostSpecificType(candidates[currentMin].PropertyType, candidates[i].PropertyType, returnType); if (newMin == 0 && indexes != null) newMin = FindMostSpecific( candidates[currentMin].GetIndexParameters(), paramOrder, paramArrayType1: null, candidates[i].GetIndexParameters(), paramOrder, paramArrayType2: null, indexes, args: null); if (newMin == 0) { newMin = FindMostSpecificProperty(candidates[currentMin], candidates[i]); if (newMin == 0) ambig = true; } if (newMin == 2) { ambig = false; currentMin = i; } } if (ambig) throw new AmbiguousMatchException(); return candidates[currentMin]; } // The default binder doesn't support any change type functionality. // This is because the default is built into the low level invoke code. public override object ChangeType(object value, Type type, CultureInfo? cultureInfo) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); public sealed override void ReorderArgumentArray(ref object?[] args, object state) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Return any exact bindings that may exist. (This method is not defined on the // Binder and is used by RuntimeType.) public static MethodBase? ExactBinding(MethodBase[] match!!, Type[] types, ParameterModifier[]? modifiers) { MethodBase[] aExactMatches = new MethodBase[match.Length]; int cExactMatches = 0; for (int i = 0; i < match.Length; i++) { ParameterInfo[] par = match[i].GetParametersNoCopy(); if (par.Length == 0) { continue; } int j; for (j = 0; j < types.Length; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (!pCls.Equals(types[j])) break; } if (j < types.Length) continue; // Add the exact match to the array of exact matches. aExactMatches[cExactMatches] = match[i]; cExactMatches++; } if (cExactMatches == 0) return null; if (cExactMatches == 1) return aExactMatches[0]; return FindMostDerivedNewSlotMeth(aExactMatches, cExactMatches); } // Return any exact bindings that may exist. (This method is not defined on the // Binder and is used by RuntimeType.) public static PropertyInfo? ExactPropertyBinding(PropertyInfo[] match!!, Type? returnType, Type[]? types, ParameterModifier[]? modifiers) { PropertyInfo? bestMatch = null; int typesLength = (types != null) ? types.Length : 0; for (int i = 0; i < match.Length; i++) { ParameterInfo[] par = match[i].GetIndexParameters(); int j; for (j = 0; j < typesLength; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (pCls != types![j]) break; } if (j < typesLength) continue; if (returnType != null && returnType != match[i].PropertyType) continue; if (bestMatch != null) throw new AmbiguousMatchException(); bestMatch = match[i]; } return bestMatch; } private static int FindMostSpecific(ParameterInfo[] p1, int[] paramOrder1, Type? paramArrayType1, ParameterInfo[] p2, int[] paramOrder2, Type? paramArrayType2, Type[] types, object[]? args) { // A method using params is always less specific than one not using params if (paramArrayType1 != null && paramArrayType2 == null) return 2; if (paramArrayType2 != null && paramArrayType1 == null) return 1; // now either p1 and p2 both use params or neither does. bool param1Less = false; bool param2Less = false; for (int i = 0; i < types.Length; i++) { if (args != null && args[i] == Type.Missing) continue; Type c1, c2; // If a param array is present, then either // the user re-ordered the parameters in which case // the argument to the param array is either an array // in which case the params is conceptually ignored and so paramArrayType1 == null // or the argument to the param array is a single element // in which case paramOrder[i] == p1.Length - 1 for that element // or the user did not re-order the parameters in which case // the paramOrder array could contain indexes larger than p.Length - 1 (see VSW 577286) // so any index >= p.Length - 1 is being put in the param array if (paramArrayType1 != null && paramOrder1[i] >= p1.Length - 1) c1 = paramArrayType1; else c1 = p1[paramOrder1[i]].ParameterType; if (paramArrayType2 != null && paramOrder2[i] >= p2.Length - 1) c2 = paramArrayType2; else c2 = p2[paramOrder2[i]].ParameterType; if (c1 == c2) continue; switch (FindMostSpecificType(c1, c2, types[i])) { case 0: return 0; case 1: param1Less = true; break; case 2: param2Less = true; break; } } // Two ways param1Less and param2Less can be equal: all the arguments are the // same they both equal false, otherwise there were things that both // were the most specific type on. if (param1Less == param2Less) { // If we cannot tell which is a better match based on parameter types (param1Less == param2Less), // let's see which one has the most matches without using the params array (the longer one wins). if (!param1Less && args != null) { if (p1.Length > p2.Length) { return 1; } else if (p2.Length > p1.Length) { return 2; } } return 0; } else { return (param1Less == true) ? 1 : 2; } } private static int FindMostSpecificType(Type c1, Type c2, Type t) { // If the two types are exact move on... if (c1 == c2) return 0; if (t.IsSignatureType()) { if (t.MatchesExactly(c1)) return 1; if (t.MatchesExactly(c2)) return 2; } else { if (c1 == t) return 1; if (c2 == t) return 2; } bool c1FromC2; bool c2FromC1; if (c1.IsByRef || c2.IsByRef) { if (c1.IsByRef && c2.IsByRef) { c1 = c1.GetElementType()!; c2 = c2.GetElementType()!; } else if (c1.IsByRef) { if (c1.GetElementType() == c2) return 2; c1 = c1.GetElementType()!; } else { if (c2.GetElementType() == c1) return 1; c2 = c2.GetElementType()!; } } if (c1.IsPrimitive && c2.IsPrimitive) { c1FromC2 = CanChangePrimitive(c2, c1); c2FromC1 = CanChangePrimitive(c1, c2); } else { c1FromC2 = c1.IsAssignableFrom(c2); c2FromC1 = c2.IsAssignableFrom(c1); } if (c1FromC2 == c2FromC1) return 0; if (c1FromC2) { return 2; } else { return 1; } } private static int FindMostSpecificMethod(MethodBase m1, int[] paramOrder1, Type? paramArrayType1, MethodBase m2, int[] paramOrder2, Type? paramArrayType2, Type[] types, object[]? args) { // Find the most specific method based on the parameters. int res = FindMostSpecific(m1.GetParametersNoCopy(), paramOrder1, paramArrayType1, m2.GetParametersNoCopy(), paramOrder2, paramArrayType2, types, args); // If the match was not ambigous then return the result. if (res != 0) return res; // Check to see if the methods have the exact same name and signature. if (CompareMethodSig(m1, m2)) { // Determine the depth of the declaring types for both methods. int hierarchyDepth1 = GetHierarchyDepth(m1.DeclaringType!); int hierarchyDepth2 = GetHierarchyDepth(m2.DeclaringType!); // The most derived method is the most specific one. if (hierarchyDepth1 == hierarchyDepth2) { return 0; } else if (hierarchyDepth1 < hierarchyDepth2) { return 2; } else { return 1; } } // The match is ambigous. return 0; } private static int FindMostSpecificProperty(PropertyInfo cur1, PropertyInfo cur2) { // Check to see if the fields have the same name. if (cur1.Name == cur2.Name) { int hierarchyDepth1 = GetHierarchyDepth(cur1.DeclaringType!); int hierarchyDepth2 = GetHierarchyDepth(cur2.DeclaringType!); if (hierarchyDepth1 == hierarchyDepth2) { return 0; } else if (hierarchyDepth1 < hierarchyDepth2) return 2; else return 1; } // The match is ambigous. return 0; } public static bool CompareMethodSig(MethodBase m1, MethodBase m2) { ParameterInfo[] params1 = m1.GetParametersNoCopy(); ParameterInfo[] params2 = m2.GetParametersNoCopy(); if (params1.Length != params2.Length) return false; int numParams = params1.Length; for (int i = 0; i < numParams; i++) { if (params1[i].ParameterType != params2[i].ParameterType) return false; } return true; } private static int GetHierarchyDepth(Type t) { int depth = 0; Type? currentType = t; do { depth++; currentType = currentType.BaseType; } while (currentType != null); return depth; } internal static MethodBase? FindMostDerivedNewSlotMeth(MethodBase[] match, int cMatches) { int deepestHierarchy = 0; MethodBase? methWithDeepestHierarchy = null; for (int i = 0; i < cMatches; i++) { // Calculate the depth of the hierarchy of the declaring type of the // current method. int currentHierarchyDepth = GetHierarchyDepth(match[i].DeclaringType!); // The two methods have the same name, signature, and hierarchy depth. // This can only happen if at least one is vararg or generic. if (currentHierarchyDepth == deepestHierarchy) { throw new AmbiguousMatchException(); } // Check to see if this method is on the most derived class. if (currentHierarchyDepth > deepestHierarchy) { deepestHierarchy = currentHierarchyDepth; methWithDeepestHierarchy = match[i]; } } return methWithDeepestHierarchy; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; using System.Reflection; using System.Reflection.TypeLoading; using CultureInfo = System.Globalization.CultureInfo; namespace System { internal sealed partial class DefaultBinder : Binder { private readonly MetadataLoadContext _loader; private readonly Type? _objectType; internal DefaultBinder(MetadataLoadContext loader) { _loader = loader; _objectType = loader.TryGetCoreType(CoreType.Object); } private bool IsImplementedByMetadataLoadContext(Type type) => type is RoType roType && roType.Loader == _loader; // This method is passed a set of methods and must choose the best // fit. The methods all have the same number of arguments and the object // array args. On exit, this method will choice the best fit method // and coerce the args to match that method. By match, we mean all primitive // arguments are exact matchs and all object arguments are exact or subclasses // of the target. If the target OR is an interface, the object must implement // that interface. There are a couple of exceptions // thrown when a method cannot be returned. If no method matchs the args and // ArgumentException is thrown. If multiple methods match the args then // an AmbiguousMatchException is thrown. // // The most specific match will be selected. // public sealed override MethodBase BindToMethod( BindingFlags bindingAttr, MethodBase[] match, ref object?[] args, ParameterModifier[]? modifiers, CultureInfo? cultureInfo, string[]? names, out object state) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Given a set of fields that match the base criteria, select a field. // if value is null then we have no way to select a field public sealed override FieldInfo BindToField(BindingFlags bindingAttr, FieldInfo[] match, object value, CultureInfo? cultureInfo) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Given a set of methods that match the base criteria, select a method based upon an array of types. // This method should return null if no method matches the criteria. public sealed override MethodBase? SelectMethod(BindingFlags bindingAttr, MethodBase[] match, Type[] types, ParameterModifier[]? modifiers) { int i; int j; Type[] realTypes = new Type[types.Length]; for (i = 0; i < types.Length; i++) { realTypes[i] = types[i].UnderlyingSystemType; if (!(IsImplementedByMetadataLoadContext(realTypes[i]) || realTypes[i].IsSignatureType())) throw new ArgumentException(SR.Arg_MustBeType, nameof(types)); } types = realTypes; // We don't automatically jump out on exact match. if (match == null || match.Length == 0) throw new ArgumentException(SR.Arg_EmptyArray, nameof(match)); MethodBase[] candidates = (MethodBase[])match.Clone(); // Find all the methods that can be described by the types parameter. // Remove all of them that cannot. int curIdx = 0; for (i = 0; i < candidates.Length; i++) { ParameterInfo[] par = candidates[i].GetParametersNoCopy(); if (par.Length != types.Length) continue; for (j = 0; j < types.Length; j++) { Type pCls = par[j].ParameterType; if (types[j].MatchesParameterTypeExactly(par[j])) continue; if (pCls == _objectType) continue; Type? type = types[j]; if (type.IsSignatureType()) { if (!(candidates[i] is MethodInfo methodInfo)) break; type = type.TryResolveAgainstGenericMethod(methodInfo); if (type == null) break; } if (pCls.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(type.UnderlyingSystemType)) || !CanChangePrimitive(type.UnderlyingSystemType, pCls.UnderlyingSystemType)) break; } else { if (!pCls.IsAssignableFrom(type)) break; } } if (j == types.Length) candidates[curIdx++] = candidates[i]; } if (curIdx == 0) return null; if (curIdx == 1) return candidates[0]; // Walk all of the methods looking the most specific method to invoke int currentMin = 0; bool ambig = false; int[] paramOrder = new int[types.Length]; for (i = 0; i < types.Length; i++) paramOrder[i] = i; for (i = 1; i < curIdx; i++) { int newMin = FindMostSpecificMethod( candidates[currentMin], paramOrder, paramArrayType1: null, candidates[i], paramOrder, paramArrayType2: null, types, args: null); if (newMin == 0) { ambig = true; } else if (newMin == 2) { currentMin = i; ambig = false; } } if (ambig) throw new AmbiguousMatchException(); return candidates[currentMin]; } // Given a set of properties that match the base criteria, select one. public sealed override PropertyInfo? SelectProperty(BindingFlags bindingAttr, PropertyInfo[] match, Type? returnType, Type[]? indexes, ParameterModifier[]? modifiers) { // Allow a null indexes array. But if it is not null, every element must be non-null as well. if (indexes != null) { foreach (Type index in indexes) { if (index == null) throw new ArgumentNullException(nameof(indexes)); } } if (match == null || match.Length == 0) throw new ArgumentException(SR.Arg_EmptyArray, nameof(match)); PropertyInfo[] candidates = (PropertyInfo[])match.Clone(); int i, j = 0; // Find all the properties that can be described by type indexes parameter int curIdx = 0; int indexesLength = (indexes != null) ? indexes.Length : 0; for (i = 0; i < candidates.Length; i++) { if (indexes != null) { ParameterInfo[] par = candidates[i].GetIndexParameters(); if (par.Length != indexesLength) continue; for (j = 0; j < indexesLength; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (pCls == indexes[j]) continue; if (pCls == _objectType) continue; if (pCls.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(indexes[j].UnderlyingSystemType)) || !CanChangePrimitive(indexes[j].UnderlyingSystemType, pCls.UnderlyingSystemType)) break; } else { if (!pCls.IsAssignableFrom(indexes[j])) break; } } } if (j == indexesLength) { if (returnType != null) { if (candidates[i].PropertyType.IsPrimitive) { if (!(IsImplementedByMetadataLoadContext(returnType.UnderlyingSystemType)) || !CanChangePrimitive(returnType.UnderlyingSystemType, candidates[i].PropertyType.UnderlyingSystemType)) continue; } else { if (!candidates[i].PropertyType.IsAssignableFrom(returnType)) continue; } } candidates[curIdx++] = candidates[i]; } } if (curIdx == 0) return null; if (curIdx == 1) return candidates[0]; // Walk all of the properties looking for the most specific method to invoke int currentMin = 0; bool ambig = false; int[] paramOrder = new int[indexesLength]; for (i = 0; i < indexesLength; i++) paramOrder[i] = i; for (i = 1; i < curIdx; i++) { Debug.Assert(returnType != null); int newMin = FindMostSpecificType(candidates[currentMin].PropertyType, candidates[i].PropertyType, returnType); if (newMin == 0 && indexes != null) newMin = FindMostSpecific( candidates[currentMin].GetIndexParameters(), paramOrder, paramArrayType1: null, candidates[i].GetIndexParameters(), paramOrder, paramArrayType2: null, indexes, args: null); if (newMin == 0) { newMin = FindMostSpecificProperty(candidates[currentMin], candidates[i]); if (newMin == 0) ambig = true; } if (newMin == 2) { ambig = false; currentMin = i; } } if (ambig) throw new AmbiguousMatchException(); return candidates[currentMin]; } // The default binder doesn't support any change type functionality. // This is because the default is built into the low level invoke code. public override object ChangeType(object value, Type type, CultureInfo? cultureInfo) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); public sealed override void ReorderArgumentArray(ref object?[] args, object state) => throw new InvalidOperationException(SR.Arg_InvalidOperation_Reflection); // Return any exact bindings that may exist. (This method is not defined on the // Binder and is used by RuntimeType.) public static MethodBase? ExactBinding(MethodBase[] match!!, Type[] types, ParameterModifier[]? modifiers) { MethodBase[] aExactMatches = new MethodBase[match.Length]; int cExactMatches = 0; for (int i = 0; i < match.Length; i++) { ParameterInfo[] par = match[i].GetParametersNoCopy(); if (par.Length == 0) { continue; } int j; for (j = 0; j < types.Length; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (!pCls.Equals(types[j])) break; } if (j < types.Length) continue; // Add the exact match to the array of exact matches. aExactMatches[cExactMatches] = match[i]; cExactMatches++; } if (cExactMatches == 0) return null; if (cExactMatches == 1) return aExactMatches[0]; return FindMostDerivedNewSlotMeth(aExactMatches, cExactMatches); } // Return any exact bindings that may exist. (This method is not defined on the // Binder and is used by RuntimeType.) public static PropertyInfo? ExactPropertyBinding(PropertyInfo[] match!!, Type? returnType, Type[]? types, ParameterModifier[]? modifiers) { PropertyInfo? bestMatch = null; int typesLength = (types != null) ? types.Length : 0; for (int i = 0; i < match.Length; i++) { ParameterInfo[] par = match[i].GetIndexParameters(); int j; for (j = 0; j < typesLength; j++) { Type pCls = par[j].ParameterType; // If the classes exactly match continue if (pCls != types![j]) break; } if (j < typesLength) continue; if (returnType != null && returnType != match[i].PropertyType) continue; if (bestMatch != null) throw new AmbiguousMatchException(); bestMatch = match[i]; } return bestMatch; } private static int FindMostSpecific(ParameterInfo[] p1, int[] paramOrder1, Type? paramArrayType1, ParameterInfo[] p2, int[] paramOrder2, Type? paramArrayType2, Type[] types, object[]? args) { // A method using params is always less specific than one not using params if (paramArrayType1 != null && paramArrayType2 == null) return 2; if (paramArrayType2 != null && paramArrayType1 == null) return 1; // now either p1 and p2 both use params or neither does. bool param1Less = false; bool param2Less = false; for (int i = 0; i < types.Length; i++) { if (args != null && args[i] == Type.Missing) continue; Type c1, c2; // If a param array is present, then either // the user re-ordered the parameters in which case // the argument to the param array is either an array // in which case the params is conceptually ignored and so paramArrayType1 == null // or the argument to the param array is a single element // in which case paramOrder[i] == p1.Length - 1 for that element // or the user did not re-order the parameters in which case // the paramOrder array could contain indexes larger than p.Length - 1 (see VSW 577286) // so any index >= p.Length - 1 is being put in the param array if (paramArrayType1 != null && paramOrder1[i] >= p1.Length - 1) c1 = paramArrayType1; else c1 = p1[paramOrder1[i]].ParameterType; if (paramArrayType2 != null && paramOrder2[i] >= p2.Length - 1) c2 = paramArrayType2; else c2 = p2[paramOrder2[i]].ParameterType; if (c1 == c2) continue; switch (FindMostSpecificType(c1, c2, types[i])) { case 0: return 0; case 1: param1Less = true; break; case 2: param2Less = true; break; } } // Two ways param1Less and param2Less can be equal: all the arguments are the // same they both equal false, otherwise there were things that both // were the most specific type on. if (param1Less == param2Less) { // If we cannot tell which is a better match based on parameter types (param1Less == param2Less), // let's see which one has the most matches without using the params array (the longer one wins). if (!param1Less && args != null) { if (p1.Length > p2.Length) { return 1; } else if (p2.Length > p1.Length) { return 2; } } return 0; } else { return (param1Less == true) ? 1 : 2; } } private static int FindMostSpecificType(Type c1, Type c2, Type t) { // If the two types are exact move on... if (c1 == c2) return 0; if (t.IsSignatureType()) { if (t.MatchesExactly(c1)) return 1; if (t.MatchesExactly(c2)) return 2; } else { if (c1 == t) return 1; if (c2 == t) return 2; } bool c1FromC2; bool c2FromC1; if (c1.IsByRef || c2.IsByRef) { if (c1.IsByRef && c2.IsByRef) { c1 = c1.GetElementType()!; c2 = c2.GetElementType()!; } else if (c1.IsByRef) { if (c1.GetElementType() == c2) return 2; c1 = c1.GetElementType()!; } else { if (c2.GetElementType() == c1) return 1; c2 = c2.GetElementType()!; } } if (c1.IsPrimitive && c2.IsPrimitive) { c1FromC2 = CanChangePrimitive(c2, c1); c2FromC1 = CanChangePrimitive(c1, c2); } else { c1FromC2 = c1.IsAssignableFrom(c2); c2FromC1 = c2.IsAssignableFrom(c1); } if (c1FromC2 == c2FromC1) return 0; if (c1FromC2) { return 2; } else { return 1; } } private static int FindMostSpecificMethod(MethodBase m1, int[] paramOrder1, Type? paramArrayType1, MethodBase m2, int[] paramOrder2, Type? paramArrayType2, Type[] types, object[]? args) { // Find the most specific method based on the parameters. int res = FindMostSpecific(m1.GetParametersNoCopy(), paramOrder1, paramArrayType1, m2.GetParametersNoCopy(), paramOrder2, paramArrayType2, types, args); // If the match was not ambigous then return the result. if (res != 0) return res; // Check to see if the methods have the exact same name and signature. if (CompareMethodSig(m1, m2)) { // Determine the depth of the declaring types for both methods. int hierarchyDepth1 = GetHierarchyDepth(m1.DeclaringType!); int hierarchyDepth2 = GetHierarchyDepth(m2.DeclaringType!); // The most derived method is the most specific one. if (hierarchyDepth1 == hierarchyDepth2) { return 0; } else if (hierarchyDepth1 < hierarchyDepth2) { return 2; } else { return 1; } } // The match is ambigous. return 0; } private static int FindMostSpecificProperty(PropertyInfo cur1, PropertyInfo cur2) { // Check to see if the fields have the same name. if (cur1.Name == cur2.Name) { int hierarchyDepth1 = GetHierarchyDepth(cur1.DeclaringType!); int hierarchyDepth2 = GetHierarchyDepth(cur2.DeclaringType!); if (hierarchyDepth1 == hierarchyDepth2) { return 0; } else if (hierarchyDepth1 < hierarchyDepth2) return 2; else return 1; } // The match is ambigous. return 0; } public static bool CompareMethodSig(MethodBase m1, MethodBase m2) { ParameterInfo[] params1 = m1.GetParametersNoCopy(); ParameterInfo[] params2 = m2.GetParametersNoCopy(); if (params1.Length != params2.Length) return false; int numParams = params1.Length; for (int i = 0; i < numParams; i++) { if (params1[i].ParameterType != params2[i].ParameterType) return false; } return true; } private static int GetHierarchyDepth(Type t) { int depth = 0; Type? currentType = t; do { depth++; currentType = currentType.BaseType; } while (currentType != null); return depth; } internal static MethodBase? FindMostDerivedNewSlotMeth(MethodBase[] match, int cMatches) { int deepestHierarchy = 0; MethodBase? methWithDeepestHierarchy = null; for (int i = 0; i < cMatches; i++) { // Calculate the depth of the hierarchy of the declaring type of the // current method. int currentHierarchyDepth = GetHierarchyDepth(match[i].DeclaringType!); // The two methods have the same name, signature, and hierarchy depth. // This can only happen if at least one is vararg or generic. if (currentHierarchyDepth == deepestHierarchy) { throw new AmbiguousMatchException(); } // Check to see if this method is on the most derived class. if (currentHierarchyDepth > deepestHierarchy) { deepestHierarchy = currentHierarchyDepth; methWithDeepestHierarchy = match[i]; } } return methWithDeepestHierarchy; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/JsonNode/ParseTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Reflection; using System.Text.Json.Serialization.Tests; using Xunit; namespace System.Text.Json.Nodes.Tests { public static class ParseTests { [Fact] public static void Parse() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); Assert.Equal("Hello!", jObject["MyString"].GetValue<string>()); Assert.Null(jObject["MyNull"]); Assert.False(jObject["MyBoolean"].GetValue<bool>()); Assert.Equal("ed957609-cdfe-412f-88c1-02daca1b4f51", jObject["MyGuid"].GetValue<string>()); Assert.IsType<JsonArray>(jObject["MyArray"]); Assert.IsType<JsonObject>(jObject["MyObject"]); Assert.Equal(43, jObject["MyInt"].GetValue<int>()); Assert.Equal<uint>(43, jObject["MyInt"].GetValue<uint>()); Assert.Equal(43, jObject["MyInt"].GetValue<long>()); Assert.Equal<ulong>(43, jObject["MyInt"].GetValue<ulong>()); Assert.Equal(43, jObject["MyInt"].GetValue<short>()); Assert.Equal<ushort>(43, jObject["MyInt"].GetValue<ushort>()); Assert.Equal(43, jObject["MyInt"].GetValue<byte>()); Assert.Equal(43, jObject["MyInt"].GetValue<sbyte>()); Assert.Equal(43, jObject["MyInt"].GetValue<decimal>()); Assert.Equal(43, jObject["MyInt"].GetValue<float>()); DateTime dt = JsonNode.Parse("\"2020-07-08T01:02:03\"").GetValue<DateTime>(); Assert.Equal(2020, dt.Year); Assert.Equal(7, dt.Month); Assert.Equal(8, dt.Day); Assert.Equal(1, dt.Hour); Assert.Equal(2, dt.Minute); Assert.Equal(3, dt.Second); DateTimeOffset dtOffset = JsonNode.Parse("\"2020-07-08T01:02:03+01:15\"").GetValue<DateTimeOffset>(); Assert.Equal(2020, dtOffset.Year); Assert.Equal(7, dtOffset.Month); Assert.Equal(8, dtOffset.Day); Assert.Equal(1, dtOffset.Hour); Assert.Equal(2, dtOffset.Minute); Assert.Equal(3, dtOffset.Second); Assert.Equal(new TimeSpan(1,15,0), dtOffset.Offset); } [Fact] public static void Parse_TryGetPropertyValue() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); JsonNode? node; Assert.True(jObject.TryGetPropertyValue("MyString", out node)); Assert.Equal("Hello!", node.GetValue<string>()); Assert.True(jObject.TryGetPropertyValue("MyNull", out node)); Assert.Null(node); Assert.True(jObject.TryGetPropertyValue("MyBoolean", out node)); Assert.False(node.GetValue<bool>()); Assert.True(jObject.TryGetPropertyValue("MyArray", out node)); Assert.IsType<JsonArray>(node); Assert.True(jObject.TryGetPropertyValue("MyInt", out node)); Assert.Equal(43, node.GetValue<int>()); Assert.True(jObject.TryGetPropertyValue("MyDateTime", out node)); Assert.Equal("2020-07-08T00:00:00", node.GetValue<string>()); Assert.True(jObject.TryGetPropertyValue("MyGuid", out node)); Assert.Equal("ed957609-cdfe-412f-88c1-02daca1b4f51", node.AsValue().GetValue<Guid>().ToString()); Assert.True(jObject.TryGetPropertyValue("MyObject", out node)); Assert.IsType<JsonObject>(node); } [Fact] public static void Parse_TryGetValue() { Assert.True(JsonNode.Parse("\"Hello\"").AsValue().TryGetValue(out string? _)); Assert.True(JsonNode.Parse("true").AsValue().TryGetValue(out bool? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out byte? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out sbyte? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out short? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out ushort? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out int? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out uint? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out long? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out ulong? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out decimal? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out float? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out double? _)); Assert.True(JsonNode.Parse("\"2020-07-08T00:00:00\"").AsValue().TryGetValue(out DateTime? _)); Assert.True(JsonNode.Parse("\"ed957609-cdfe-412f-88c1-02daca1b4f51\"").AsValue().TryGetValue(out Guid? _)); Assert.True(JsonNode.Parse("\"2020-07-08T01:02:03+01:15\"").AsValue().TryGetValue(out DateTimeOffset? _)); JsonValue? jValue = JsonNode.Parse("\"Hello!\"").AsValue(); Assert.False(jValue.TryGetValue(out int _)); Assert.False(jValue.TryGetValue(out DateTime _)); Assert.False(jValue.TryGetValue(out DateTimeOffset _)); Assert.False(jValue.TryGetValue(out Guid _)); } [Fact] public static void Parse_Fail() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); Assert.Throws<InvalidOperationException>(() => jObject["MyString"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyBoolean"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyGuid"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyInt"].GetValue<string>()); Assert.Throws<InvalidOperationException>(() => jObject["MyDateTime"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyObject"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyArray"].GetValue<int>()); } [Fact] public static void NullReference_Fail() { Assert.Throws<ArgumentNullException>(() => JsonSerializer.Deserialize<JsonNode>((string)null)); Assert.Throws<ArgumentNullException>(() => JsonNode.Parse((string)null)); Assert.Throws<ArgumentNullException>(() => JsonNode.Parse((Stream)null)); } [Fact] public static void NullLiteral() { Assert.Null(JsonSerializer.Deserialize<JsonNode>("null")); Assert.Null(JsonNode.Parse("null")); using (MemoryStream stream = new MemoryStream(Encoding.UTF8.GetBytes("null"))) { Assert.Null(JsonNode.Parse(stream)); } } [Fact] public static void InternalValueFields() { // Use reflection to inspect the internal state of the 3 fields that hold values. // There is not another way to verify, and using a debug watch causes nodes to be created. FieldInfo elementField = typeof(JsonObject).GetField("_jsonElement", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(elementField); FieldInfo jsonDictionaryField = typeof(JsonObject).GetField("_dictionary", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(jsonDictionaryField); Type jsonPropertyDictionaryType = typeof(JsonObject).Assembly.GetType("System.Text.Json.JsonPropertyDictionary`1"); Assert.NotNull(jsonPropertyDictionaryType); jsonPropertyDictionaryType = jsonPropertyDictionaryType.MakeGenericType(new Type[] { typeof(JsonNode) }); FieldInfo listField = jsonPropertyDictionaryType.GetField("_propertyList", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(listField); FieldInfo dictionaryField = jsonPropertyDictionaryType.GetField("_propertyDictionary", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(dictionaryField); using (MemoryStream stream = new MemoryStream(SimpleTestClass.s_data)) { // Only JsonElement is present. JsonNode node = JsonNode.Parse(stream); object jsonDictionary = jsonDictionaryField.GetValue(node); Assert.Null(jsonDictionary); // Value is null until converted from JsonElement. Assert.NotNull(elementField.GetValue(node)); Test(); // Cause the single JsonElement to expand into individual JsonElement nodes. Assert.Equal(1, node.AsObject()["MyInt16"].GetValue<int>()); Assert.Null(elementField.GetValue(node)); jsonDictionary = jsonDictionaryField.GetValue(node); Assert.NotNull(jsonDictionary); Assert.NotNull(listField.GetValue(jsonDictionary)); Assert.NotNull(dictionaryField.GetValue(jsonDictionary)); // The dictionary threshold was reached. Test(); void Test() { string actual = node.ToJsonString(); // Replace the escaped "+" sign used with DateTimeOffset. actual = actual.Replace("\\u002B", "+"); Assert.Equal(SimpleTestClass.s_json.StripWhitespace(), actual); } } } [Fact] public static void ReadSimpleObjectWithTrailingTrivia() { byte[] data = Encoding.UTF8.GetBytes(SimpleTestClass.s_json + " /* Multi\r\nLine Comment */\t"); using (MemoryStream stream = new MemoryStream(data)) { var options = new JsonDocumentOptions { CommentHandling = JsonCommentHandling.Skip }; JsonNode node = JsonNode.Parse(stream, nodeOptions: null, options); string actual = node.ToJsonString(); // Replace the escaped "+" sign used with DateTimeOffset. actual = actual.Replace("\\u002B", "+"); Assert.Equal(SimpleTestClass.s_json.StripWhitespace(), actual); } } [Fact] public static void ReadPrimitives() { using (MemoryStream stream = new MemoryStream(Encoding.UTF8.GetBytes(@"1"))) { int i = JsonNode.Parse(stream).AsValue().GetValue<int>(); Assert.Equal(1, i); } } [Fact] public static void ParseThenEdit() { const string Expected = "{\"MyString\":null,\"Node\":42,\"Array\":[43],\"Value\":44,\"IntValue\":45,\"Object\":{\"Property\":46}}"; JsonNode node = JsonNode.Parse(Expected); Assert.Equal(Expected, node.ToJsonString()); // Change a primitive node["IntValue"] = 1; const string ExpectedAfterEdit1 = "{\"MyString\":null,\"Node\":42,\"Array\":[43],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit1, node.ToJsonString()); // Change element node["Array"][0] = 2; const string ExpectedAfterEdit2 = "{\"MyString\":null,\"Node\":42,\"Array\":[2],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit2, node.ToJsonString()); // Change property node["MyString"] = "3"; const string ExpectedAfterEdit3 = "{\"MyString\":\"3\",\"Node\":42,\"Array\":[2],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit3, node.ToJsonString()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Reflection; using System.Text.Json.Serialization.Tests; using Xunit; namespace System.Text.Json.Nodes.Tests { public static class ParseTests { [Fact] public static void Parse() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); Assert.Equal("Hello!", jObject["MyString"].GetValue<string>()); Assert.Null(jObject["MyNull"]); Assert.False(jObject["MyBoolean"].GetValue<bool>()); Assert.Equal("ed957609-cdfe-412f-88c1-02daca1b4f51", jObject["MyGuid"].GetValue<string>()); Assert.IsType<JsonArray>(jObject["MyArray"]); Assert.IsType<JsonObject>(jObject["MyObject"]); Assert.Equal(43, jObject["MyInt"].GetValue<int>()); Assert.Equal<uint>(43, jObject["MyInt"].GetValue<uint>()); Assert.Equal(43, jObject["MyInt"].GetValue<long>()); Assert.Equal<ulong>(43, jObject["MyInt"].GetValue<ulong>()); Assert.Equal(43, jObject["MyInt"].GetValue<short>()); Assert.Equal<ushort>(43, jObject["MyInt"].GetValue<ushort>()); Assert.Equal(43, jObject["MyInt"].GetValue<byte>()); Assert.Equal(43, jObject["MyInt"].GetValue<sbyte>()); Assert.Equal(43, jObject["MyInt"].GetValue<decimal>()); Assert.Equal(43, jObject["MyInt"].GetValue<float>()); DateTime dt = JsonNode.Parse("\"2020-07-08T01:02:03\"").GetValue<DateTime>(); Assert.Equal(2020, dt.Year); Assert.Equal(7, dt.Month); Assert.Equal(8, dt.Day); Assert.Equal(1, dt.Hour); Assert.Equal(2, dt.Minute); Assert.Equal(3, dt.Second); DateTimeOffset dtOffset = JsonNode.Parse("\"2020-07-08T01:02:03+01:15\"").GetValue<DateTimeOffset>(); Assert.Equal(2020, dtOffset.Year); Assert.Equal(7, dtOffset.Month); Assert.Equal(8, dtOffset.Day); Assert.Equal(1, dtOffset.Hour); Assert.Equal(2, dtOffset.Minute); Assert.Equal(3, dtOffset.Second); Assert.Equal(new TimeSpan(1,15,0), dtOffset.Offset); } [Fact] public static void Parse_TryGetPropertyValue() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); JsonNode? node; Assert.True(jObject.TryGetPropertyValue("MyString", out node)); Assert.Equal("Hello!", node.GetValue<string>()); Assert.True(jObject.TryGetPropertyValue("MyNull", out node)); Assert.Null(node); Assert.True(jObject.TryGetPropertyValue("MyBoolean", out node)); Assert.False(node.GetValue<bool>()); Assert.True(jObject.TryGetPropertyValue("MyArray", out node)); Assert.IsType<JsonArray>(node); Assert.True(jObject.TryGetPropertyValue("MyInt", out node)); Assert.Equal(43, node.GetValue<int>()); Assert.True(jObject.TryGetPropertyValue("MyDateTime", out node)); Assert.Equal("2020-07-08T00:00:00", node.GetValue<string>()); Assert.True(jObject.TryGetPropertyValue("MyGuid", out node)); Assert.Equal("ed957609-cdfe-412f-88c1-02daca1b4f51", node.AsValue().GetValue<Guid>().ToString()); Assert.True(jObject.TryGetPropertyValue("MyObject", out node)); Assert.IsType<JsonObject>(node); } [Fact] public static void Parse_TryGetValue() { Assert.True(JsonNode.Parse("\"Hello\"").AsValue().TryGetValue(out string? _)); Assert.True(JsonNode.Parse("true").AsValue().TryGetValue(out bool? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out byte? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out sbyte? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out short? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out ushort? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out int? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out uint? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out long? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out ulong? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out decimal? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out float? _)); Assert.True(JsonNode.Parse("42").AsValue().TryGetValue(out double? _)); Assert.True(JsonNode.Parse("\"2020-07-08T00:00:00\"").AsValue().TryGetValue(out DateTime? _)); Assert.True(JsonNode.Parse("\"ed957609-cdfe-412f-88c1-02daca1b4f51\"").AsValue().TryGetValue(out Guid? _)); Assert.True(JsonNode.Parse("\"2020-07-08T01:02:03+01:15\"").AsValue().TryGetValue(out DateTimeOffset? _)); JsonValue? jValue = JsonNode.Parse("\"Hello!\"").AsValue(); Assert.False(jValue.TryGetValue(out int _)); Assert.False(jValue.TryGetValue(out DateTime _)); Assert.False(jValue.TryGetValue(out DateTimeOffset _)); Assert.False(jValue.TryGetValue(out Guid _)); } [Fact] public static void Parse_Fail() { JsonObject jObject = JsonNode.Parse(JsonNodeTests.ExpectedDomJson).AsObject(); Assert.Throws<InvalidOperationException>(() => jObject["MyString"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyBoolean"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyGuid"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyInt"].GetValue<string>()); Assert.Throws<InvalidOperationException>(() => jObject["MyDateTime"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyObject"].GetValue<int>()); Assert.Throws<InvalidOperationException>(() => jObject["MyArray"].GetValue<int>()); } [Fact] public static void NullReference_Fail() { Assert.Throws<ArgumentNullException>(() => JsonSerializer.Deserialize<JsonNode>((string)null)); Assert.Throws<ArgumentNullException>(() => JsonNode.Parse((string)null)); Assert.Throws<ArgumentNullException>(() => JsonNode.Parse((Stream)null)); } [Fact] public static void NullLiteral() { Assert.Null(JsonSerializer.Deserialize<JsonNode>("null")); Assert.Null(JsonNode.Parse("null")); using (MemoryStream stream = new MemoryStream(Encoding.UTF8.GetBytes("null"))) { Assert.Null(JsonNode.Parse(stream)); } } [Fact] public static void InternalValueFields() { // Use reflection to inspect the internal state of the 3 fields that hold values. // There is not another way to verify, and using a debug watch causes nodes to be created. FieldInfo elementField = typeof(JsonObject).GetField("_jsonElement", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(elementField); FieldInfo jsonDictionaryField = typeof(JsonObject).GetField("_dictionary", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(jsonDictionaryField); Type jsonPropertyDictionaryType = typeof(JsonObject).Assembly.GetType("System.Text.Json.JsonPropertyDictionary`1"); Assert.NotNull(jsonPropertyDictionaryType); jsonPropertyDictionaryType = jsonPropertyDictionaryType.MakeGenericType(new Type[] { typeof(JsonNode) }); FieldInfo listField = jsonPropertyDictionaryType.GetField("_propertyList", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(listField); FieldInfo dictionaryField = jsonPropertyDictionaryType.GetField("_propertyDictionary", BindingFlags.Instance | BindingFlags.NonPublic); Assert.NotNull(dictionaryField); using (MemoryStream stream = new MemoryStream(SimpleTestClass.s_data)) { // Only JsonElement is present. JsonNode node = JsonNode.Parse(stream); object jsonDictionary = jsonDictionaryField.GetValue(node); Assert.Null(jsonDictionary); // Value is null until converted from JsonElement. Assert.NotNull(elementField.GetValue(node)); Test(); // Cause the single JsonElement to expand into individual JsonElement nodes. Assert.Equal(1, node.AsObject()["MyInt16"].GetValue<int>()); Assert.Null(elementField.GetValue(node)); jsonDictionary = jsonDictionaryField.GetValue(node); Assert.NotNull(jsonDictionary); Assert.NotNull(listField.GetValue(jsonDictionary)); Assert.NotNull(dictionaryField.GetValue(jsonDictionary)); // The dictionary threshold was reached. Test(); void Test() { string actual = node.ToJsonString(); // Replace the escaped "+" sign used with DateTimeOffset. actual = actual.Replace("\\u002B", "+"); Assert.Equal(SimpleTestClass.s_json.StripWhitespace(), actual); } } } [Fact] public static void ReadSimpleObjectWithTrailingTrivia() { byte[] data = Encoding.UTF8.GetBytes(SimpleTestClass.s_json + " /* Multi\r\nLine Comment */\t"); using (MemoryStream stream = new MemoryStream(data)) { var options = new JsonDocumentOptions { CommentHandling = JsonCommentHandling.Skip }; JsonNode node = JsonNode.Parse(stream, nodeOptions: null, options); string actual = node.ToJsonString(); // Replace the escaped "+" sign used with DateTimeOffset. actual = actual.Replace("\\u002B", "+"); Assert.Equal(SimpleTestClass.s_json.StripWhitespace(), actual); } } [Fact] public static void ReadPrimitives() { using (MemoryStream stream = new MemoryStream(Encoding.UTF8.GetBytes(@"1"))) { int i = JsonNode.Parse(stream).AsValue().GetValue<int>(); Assert.Equal(1, i); } } [Fact] public static void ParseThenEdit() { const string Expected = "{\"MyString\":null,\"Node\":42,\"Array\":[43],\"Value\":44,\"IntValue\":45,\"Object\":{\"Property\":46}}"; JsonNode node = JsonNode.Parse(Expected); Assert.Equal(Expected, node.ToJsonString()); // Change a primitive node["IntValue"] = 1; const string ExpectedAfterEdit1 = "{\"MyString\":null,\"Node\":42,\"Array\":[43],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit1, node.ToJsonString()); // Change element node["Array"][0] = 2; const string ExpectedAfterEdit2 = "{\"MyString\":null,\"Node\":42,\"Array\":[2],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit2, node.ToJsonString()); // Change property node["MyString"] = "3"; const string ExpectedAfterEdit3 = "{\"MyString\":\"3\",\"Node\":42,\"Array\":[2],\"Value\":44,\"IntValue\":1,\"Object\":{\"Property\":46}}"; Assert.Equal(ExpectedAfterEdit3, node.ToJsonString()); } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.ComponentModel.TypeConverter/tests/ByteConvertersTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel.Design.Serialization; using System.Globalization; namespace System.ComponentModel.Tests { public class ByteConverterTests : BaseNumberConverterTests { public override TypeConverter Converter => new ByteConverter(); public override IEnumerable<ConvertTest> ConvertToTestData() { yield return ConvertTest.Valid((byte)1, "1"); yield return ConvertTest.Valid((byte)2, (byte)2, CultureInfo.InvariantCulture); yield return ConvertTest.Valid((byte)3, (float)3.0); yield return ConvertTest.CantConvertTo((byte)3, typeof(InstanceDescriptor)); yield return ConvertTest.CantConvertTo((byte)3, typeof(object)); } public override IEnumerable<ConvertTest> ConvertFromTestData() { yield return ConvertTest.Valid("1", (byte)1); yield return ConvertTest.Valid("#2", (byte)2); yield return ConvertTest.Valid(" #2 ", (byte)2); yield return ConvertTest.Valid("0x3", (byte)3); yield return ConvertTest.Valid("0X3", (byte)3); yield return ConvertTest.Valid(" 0X3 ", (byte)3); yield return ConvertTest.Valid("&h4", (byte)4); yield return ConvertTest.Valid("&H4", (byte)4); yield return ConvertTest.Valid(" &H4 ", (byte)4); yield return ConvertTest.Valid("+5", (byte)5); yield return ConvertTest.Valid(" +5 ", (byte)5); yield return ConvertTest.Valid("!1", (byte)1, new CustomPositiveSymbolCulture()); yield return ConvertTest.Throws<ArgumentException, Exception>("-1"); yield return ConvertTest.Throws<ArgumentException, Exception>("256"); foreach (ConvertTest test in base.ConvertFromTestData()) { yield return test; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel.Design.Serialization; using System.Globalization; namespace System.ComponentModel.Tests { public class ByteConverterTests : BaseNumberConverterTests { public override TypeConverter Converter => new ByteConverter(); public override IEnumerable<ConvertTest> ConvertToTestData() { yield return ConvertTest.Valid((byte)1, "1"); yield return ConvertTest.Valid((byte)2, (byte)2, CultureInfo.InvariantCulture); yield return ConvertTest.Valid((byte)3, (float)3.0); yield return ConvertTest.CantConvertTo((byte)3, typeof(InstanceDescriptor)); yield return ConvertTest.CantConvertTo((byte)3, typeof(object)); } public override IEnumerable<ConvertTest> ConvertFromTestData() { yield return ConvertTest.Valid("1", (byte)1); yield return ConvertTest.Valid("#2", (byte)2); yield return ConvertTest.Valid(" #2 ", (byte)2); yield return ConvertTest.Valid("0x3", (byte)3); yield return ConvertTest.Valid("0X3", (byte)3); yield return ConvertTest.Valid(" 0X3 ", (byte)3); yield return ConvertTest.Valid("&h4", (byte)4); yield return ConvertTest.Valid("&H4", (byte)4); yield return ConvertTest.Valid(" &H4 ", (byte)4); yield return ConvertTest.Valid("+5", (byte)5); yield return ConvertTest.Valid(" +5 ", (byte)5); yield return ConvertTest.Valid("!1", (byte)1, new CustomPositiveSymbolCulture()); yield return ConvertTest.Throws<ArgumentException, Exception>("-1"); yield return ConvertTest.Throws<ArgumentException, Exception>("256"); foreach (ConvertTest test in base.ConvertFromTestData()) { yield return test; } } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/jit64/hfa/main/testG/hfa_sd1G_r.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testG.cs" /> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_managed.csproj" /> <ProjectReference Include="..\dll\CMakeLists.txt" /> <ProjectReference Include="..\dll\hfa_simple_f64_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testG.cs" /> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f64_managed.csproj" /> <ProjectReference Include="..\dll\CMakeLists.txt" /> <ProjectReference Include="..\dll\hfa_simple_f64_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.IO.Packaging/src/System/IO/Packaging/ZipPackage.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Xml; //Required for Content Type File manipulation using System.Diagnostics; using System.IO.Compression; using System.Diagnostics.CodeAnalysis; namespace System.IO.Packaging { /// <summary> /// ZipPackage is a specific implementation for the abstract Package /// class, corresponding to the Zip file format. /// This is a part of the Packaging Layer APIs. /// </summary> public sealed class ZipPackage : Package { #region Public Methods #region PackagePart Methods /// <summary> /// This method is for custom implementation for the underlying file format /// Adds a new item to the zip archive corresponding to the PackagePart in the package. /// </summary> /// <param name="partUri">PartName</param> /// <param name="contentType">Content type of the part</param> /// <param name="compressionOption">Compression option for this part</param> /// <returns></returns> /// <exception cref="ArgumentNullException">If partUri parameter is null</exception> /// <exception cref="ArgumentNullException">If contentType parameter is null</exception> /// <exception cref="ArgumentException">If partUri parameter does not conform to the valid partUri syntax</exception> /// <exception cref="ArgumentOutOfRangeException">If CompressionOption enumeration [compressionOption] does not have one of the valid values</exception> protected override PackagePart CreatePartCore(Uri partUri, string contentType, CompressionOption compressionOption) { //Validating the PartUri - this method will do the argument checking required for uri. partUri = PackUriHelper.ValidatePartUri(partUri); if (contentType == null) throw new ArgumentNullException(nameof(contentType)); Package.ThrowIfCompressionOptionInvalid(compressionOption); // Convert XPS CompressionOption to Zip CompressionMethodEnum. CompressionLevel level; GetZipCompressionMethodFromOpcCompressionOption(compressionOption, out level); // Create new Zip item. // We need to remove the leading "/" character at the beginning of the part name. // The partUri object must be a ValidatedPartUri string zipItemName = ((PackUriHelper.ValidatedPartUri)partUri).PartUriString.Substring(1); ZipArchiveEntry zipArchiveEntry = _zipArchive.CreateEntry(zipItemName, level); //Store the content type of this part in the content types stream. _contentTypeHelper.AddContentType((PackUriHelper.ValidatedPartUri)partUri, new ContentType(contentType), level); return new ZipPackagePart(this, zipArchiveEntry.Archive, zipArchiveEntry, _zipStreamManager, (PackUriHelper.ValidatedPartUri)partUri, contentType, compressionOption); } /// <summary> /// This method is for custom implementation specific to the file format. /// Returns the part after reading the actual physical bits. The method /// returns a null to indicate that the part corresponding to the specified /// Uri was not found in the container. /// This method does not throw an exception if a part does not exist. /// </summary> /// <param name="partUri"></param> /// <returns></returns> protected override PackagePart? GetPartCore(Uri partUri) { //Currently the design has two aspects which makes it possible to return //a null from this method - // 1. All the parts are loaded at Package.Open time and as such, this // method would not be invoked, unless the user is asking for - // i. a part that does not exist - we can safely return null // ii.a part(interleaved/non-interleaved) that was added to the // underlying package by some other means, and the user wants to // access the updated part. This is currently not possible as the // underlying zip i/o layer does not allow for FileShare.ReadWrite. // 2. Also, its not a straightforward task to determine if a new part was // added as we need to look for atomic as well as interleaved parts and // this has to be done in a case sensitive manner. So, effectively // we will have to go through the entire list of zip items to determine // if there are any updates. // If ever the design changes, then this method must be updated accordingly return null; } /// <summary> /// This method is for custom implementation specific to the file format. /// Deletes the part corresponding to the uri specified. Deleting a part that does not /// exists is not an error and so we do not throw an exception in that case. /// </summary> /// <param name="partUri"></param> /// <exception cref="ArgumentNullException">If partUri parameter is null</exception> /// <exception cref="ArgumentException">If partUri parameter does not conform to the valid partUri syntax</exception> protected override void DeletePartCore(Uri partUri) { //Validating the PartUri - this method will do the argument checking required for uri. partUri = PackUriHelper.ValidatePartUri(partUri); string partZipName = GetZipItemNameFromOpcName(PackUriHelper.GetStringForPartUri(partUri)); ZipArchiveEntry? zipArchiveEntry = _zipArchive.GetEntry(partZipName); if (zipArchiveEntry != null) { // Case of an atomic part. zipArchiveEntry.Delete(); } //Delete the content type for this part if it was specified as an override _contentTypeHelper.DeleteContentType((PackUriHelper.ValidatedPartUri)partUri); } /// <summary> /// This method is for custom implementation specific to the file format. /// This is the method that knows how to get the actual parts from the underlying /// zip archive. /// </summary> /// <remarks> /// <para> /// Some or all of the parts may be interleaved. The Part object for an interleaved part encapsulates /// the Uri of the proper part name and the ZipFileInfo of the initial piece. /// This function does not go through the extra work of checking piece naming validity /// throughout the package. /// </para> /// <para> /// This means that interleaved parts without an initial piece will be silently ignored. /// Other naming anomalies get caught at the Stream level when an I/O operation involves /// an anomalous or missing piece. /// </para> /// <para> /// This function reads directly from the underlying IO layer and is supposed to be called /// just once in the lifetime of a package (at init time). /// </para> /// </remarks> /// <returns>An array of ZipPackagePart.</returns> protected override PackagePart[] GetPartsCore() { List<PackagePart> parts = new List<PackagePart>(InitialPartListSize); // The list of files has to be searched linearly (1) to identify the content type // stream, and (2) to identify parts. System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipArchiveEntries = _zipArchive.Entries; // We have already identified the [ContentTypes].xml pieces if any are present during // the initialization of ZipPackage object // Record parts and ignored items. foreach (ZipArchiveEntry zipArchiveEntry in zipArchiveEntries) { //Returns false if - // a. its a content type item // b. items that have either a leading or trailing slash. if (IsZipItemValidOpcPartOrPiece(zipArchiveEntry.FullName)) { Uri partUri = new Uri(GetOpcNameFromZipItemName(zipArchiveEntry.FullName), UriKind.Relative); if (PackUriHelper.TryValidatePartUri(partUri, out PackUriHelper.ValidatedPartUri? validatedPartUri)) { ContentType? contentType = _contentTypeHelper.GetContentType(validatedPartUri); if (contentType != null) { // In case there was some redundancy between pieces and/or the atomic // part, it will be detected at this point because the part's Uri (which // is independent of interleaving) will already be in the dictionary. parts.Add(new ZipPackagePart(this, zipArchiveEntry.Archive, zipArchiveEntry, _zipStreamManager, validatedPartUri, contentType.ToString(), GetCompressionOptionFromZipFileInfo(zipArchiveEntry))); } } //If not valid part uri we can completely ignore this zip file item. Even if later someone adds //a new part, the corresponding zip item can never map to one of these items } // If IsZipItemValidOpcPartOrPiece returns false, it implies that either the zip file Item // starts or ends with a "/" and as such we can completely ignore this zip file item. Even if later // a new part gets added, its corresponding zip item cannot map to one of these items. } return parts.ToArray(); } #endregion PackagePart Methods #region Other Methods /// <summary> /// This method is for custom implementation corresponding to the underlying zip file format. /// </summary> protected override void FlushCore() { //Save the content type file to the archive. _contentTypeHelper.SaveToFile(); } /// <summary> /// Closes the underlying ZipArchive object for this container /// </summary> /// <param name="disposing">True if called during Dispose, false if called during Finalize</param> protected override void Dispose(bool disposing) { try { if (disposing) { if (_contentTypeHelper != null) { _contentTypeHelper.SaveToFile(); } if (_zipStreamManager != null) { _zipStreamManager.Dispose(); } if (_zipArchive != null) { _zipArchive.Dispose(); } // _containerStream may be opened given a file name, in which case it should be closed here. // _containerStream may be passed into the constructor, in which case, it should not be closed here. if (_shouldCloseContainerStream) { _containerStream.Dispose(); } else { } _containerStream = null!; } } finally { base.Dispose(disposing); } } #endregion Other Methods #endregion Public Methods #region Internal Constructors /// <summary> /// Internal constructor that is called by the OpenOnFile static method. /// </summary> /// <param name="path">File path to the container.</param> /// <param name="packageFileMode">Container is opened in the specified mode if possible</param> /// <param name="packageFileAccess">Container is opened with the specified access if possible</param> /// <param name="share">Container is opened with the specified share if possible</param> internal ZipPackage(string path, FileMode packageFileMode, FileAccess packageFileAccess, FileShare share) : base(packageFileAccess) { ZipArchive? zipArchive = null; ContentTypeHelper? contentTypeHelper; _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; try { _containerStream = new FileStream(path, _packageFileMode, _packageFileAccess, share); _shouldCloseContainerStream = true; ZipArchiveMode zipArchiveMode = ZipArchiveMode.Update; if (packageFileAccess == FileAccess.Read) zipArchiveMode = ZipArchiveMode.Read; else if (packageFileAccess == FileAccess.Write) zipArchiveMode = ZipArchiveMode.Create; else if (packageFileAccess == FileAccess.ReadWrite) zipArchiveMode = ZipArchiveMode.Update; zipArchive = new ZipArchive(_containerStream, zipArchiveMode, true, Text.Encoding.UTF8); _zipStreamManager = new ZipStreamManager(zipArchive, _packageFileMode, _packageFileAccess); contentTypeHelper = new ContentTypeHelper(zipArchive, _packageFileMode, _packageFileAccess, _zipStreamManager); } catch { zipArchive?.Dispose(); _containerStream?.Dispose(); throw; } _zipArchive = zipArchive; _contentTypeHelper = contentTypeHelper; } /// <summary> /// Internal constructor that is called by the Open(Stream) static methods. /// </summary> /// <param name="s"></param> /// <param name="packageFileMode"></param> /// <param name="packageFileAccess"></param> internal ZipPackage(Stream s, FileMode packageFileMode, FileAccess packageFileAccess) : base(packageFileAccess) { ZipArchive? zipArchive = null; ContentTypeHelper? contentTypeHelper; _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; try { if (s.CanSeek) { switch (packageFileMode) { case FileMode.Open: if (s.Length == 0) { throw new FileFormatException(SR.ZipZeroSizeFileIsNotValidArchive); } break; case FileMode.CreateNew: if (s.Length != 0) { throw new IOException(SR.CreateNewOnNonEmptyStream); } break; case FileMode.Create: if (s.Length != 0) { s.SetLength(0); // Discard existing data } break; } } ZipArchiveMode zipArchiveMode = ZipArchiveMode.Update; if (packageFileAccess == FileAccess.Read) zipArchiveMode = ZipArchiveMode.Read; else if (packageFileAccess == FileAccess.Write) zipArchiveMode = ZipArchiveMode.Create; else if (packageFileAccess == FileAccess.ReadWrite) zipArchiveMode = ZipArchiveMode.Update; zipArchive = new ZipArchive(s, zipArchiveMode, true, Text.Encoding.UTF8); _zipStreamManager = new ZipStreamManager(zipArchive, packageFileMode, packageFileAccess); contentTypeHelper = new ContentTypeHelper(zipArchive, packageFileMode, packageFileAccess, _zipStreamManager); } catch (InvalidDataException) { throw new FileFormatException(SR.FileContainsCorruptedData); } catch { if (zipArchive != null) { zipArchive.Dispose(); } throw; } _containerStream = s; _shouldCloseContainerStream = false; _zipArchive = zipArchive; _contentTypeHelper = contentTypeHelper; } #endregion Internal Constructors #region Internal Methods // More generic function than GetZipItemNameFromPartName. In particular, it will handle piece names. internal static string GetZipItemNameFromOpcName(string opcName) { Debug.Assert(opcName != null && opcName.Length > 0); return opcName.Substring(1); } // More generic function than GetPartNameFromZipItemName. In particular, it will handle piece names. internal static string GetOpcNameFromZipItemName(string zipItemName) { return string.Concat(ForwardSlashString, zipItemName); } // Convert from XPS CompressionOption to ZipFileInfo compression properties. internal static void GetZipCompressionMethodFromOpcCompressionOption( CompressionOption compressionOption, out CompressionLevel compressionLevel) { switch (compressionOption) { case CompressionOption.NotCompressed: { compressionLevel = CompressionLevel.NoCompression; } break; case CompressionOption.Normal: { compressionLevel = CompressionLevel.Optimal; } break; case CompressionOption.Maximum: { compressionLevel = CompressionLevel.Optimal; } break; case CompressionOption.Fast: { compressionLevel = CompressionLevel.Fastest; } break; case CompressionOption.SuperFast: { compressionLevel = CompressionLevel.Fastest; } break; // fall-through is not allowed default: { Debug.Fail("Encountered an invalid CompressionOption enum value"); goto case CompressionOption.NotCompressed; } } } #endregion Internal Methods internal FileMode PackageFileMode { get { return _packageFileMode; } } #region Private Methods //returns a boolean indicating if the underlying zip item is a valid metro part or piece // This mainly excludes the content type item, as well as entries with leading or trailing // slashes. private bool IsZipItemValidOpcPartOrPiece(string zipItemName) { Debug.Assert(zipItemName != null, "The parameter zipItemName should not be null"); //check if the zip item is the Content type item -case sensitive comparison // The following test will filter out an atomic content type file, with name // "[Content_Types].xml", as well as an interleaved one, with piece names such as // "[Content_Types].xml/[0].piece" or "[Content_Types].xml/[5].last.piece". if (zipItemName.StartsWith(ContentTypeHelper.ContentTypeFileName, StringComparison.OrdinalIgnoreCase)) return false; else { //Could be an empty zip folder //We decided to ignore zip items that contain a "/" as this could be a folder in a zip archive //Some of the tools support this and some don't. There is no way ensure that the zip item never have //a leading "/", although this is a requirement we impose on items created through our API //Therefore we ignore them at the packaging api level. if (zipItemName.StartsWith(ForwardSlashString, StringComparison.Ordinal)) return false; //This will ignore the folder entries found in the zip package created by some zip tool //PartNames ending with a "/" slash is also invalid so we are skipping these entries, //this will also prevent the PackUriHelper.CreatePartUri from throwing when it encounters a // partname ending with a "/" if (zipItemName.EndsWith(ForwardSlashString, StringComparison.Ordinal)) return false; else return true; } } // convert from Zip CompressionMethodEnum and DeflateOptionEnum to XPS CompressionOption private static CompressionOption GetCompressionOptionFromZipFileInfo(ZipArchiveEntry zipFileInfo) { // Note: we can't determine compression method / level from the ZipArchiveEntry. CompressionOption result = CompressionOption.Normal; return result; } #endregion Private Methods #region Private Members private const int InitialPartListSize = 50; private readonly ZipArchive _zipArchive; private Stream _containerStream; // stream we are opened in if Open(Stream) was called private readonly bool _shouldCloseContainerStream; private readonly ContentTypeHelper _contentTypeHelper; // manages the content types for all the parts in the container private readonly ZipStreamManager _zipStreamManager; // manages streams for all parts, avoiding opening streams multiple times private readonly FileAccess _packageFileAccess; private readonly FileMode _packageFileMode; private const string ForwardSlashString = "/"; //Required for creating a part name from a zip item name //IEqualityComparer for extensions private static readonly ExtensionEqualityComparer s_extensionEqualityComparer = new ExtensionEqualityComparer(); #endregion Private Members /// <summary> /// ExtensionComparer /// The Extensions are stored in the Default Dictionary in their original form, /// however they are compared in a normalized manner. /// Equivalence for extensions in the content type stream, should follow /// the same rules as extensions of partnames. Also, by the time this code is invoked, /// we have already validated, that the extension is in the correct format as per the /// part name rules.So we are simplifying the logic here to just convert the extensions /// to Upper invariant form and then compare them. /// </summary> private sealed class ExtensionEqualityComparer : IEqualityComparer<string> { bool IEqualityComparer<string>.Equals(string? extensionA, string? extensionB) { Debug.Assert(extensionA != null, "extension should not be null"); Debug.Assert(extensionB != null, "extension should not be null"); //Important Note: any change to this should be made in accordance //with the rules for comparing/normalizing partnames. //Refer to PackUriHelper.ValidatedPartUri.GetNormalizedPartUri method. //Currently normalization just involves upper-casing ASCII and hence the simplification. return (string.CompareOrdinal(extensionA.ToUpperInvariant(), extensionB.ToUpperInvariant()) == 0); } int IEqualityComparer<string>.GetHashCode(string extension) { Debug.Assert(extension != null, "extension should not be null"); //Important Note: any change to this should be made in accordance //with the rules for comparing/normalizing partnames. //Refer to PackUriHelper.ValidatedPartUri.GetNormalizedPartUri method. //Currently normalization just involves upper-casing ASCII and hence the simplification. return extension.ToUpperInvariant().GetHashCode(); } } /// <summary> /// This is a helper class that maintains the Content Types File related to /// this ZipPackage. /// </summary> private sealed class ContentTypeHelper { /// <summary> /// Initialize the object without uploading any information from the package. /// Complete initialization in read mode also involves calling ParseContentTypesFile /// to deserialize content type information. /// </summary> internal ContentTypeHelper(ZipArchive zipArchive, FileMode packageFileMode, FileAccess packageFileAccess, ZipStreamManager zipStreamManager) { _zipArchive = zipArchive; //initialized in the ZipPackage constructor _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; _zipStreamManager = zipStreamManager; //initialized in the ZipPackage constructor // The extensions are stored in the default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. _defaultDictionary = new Dictionary<string, ContentType>(DefaultDictionaryInitialSize, s_extensionEqualityComparer); // Identify the content type file or files before identifying parts and piece sequences. // This is necessary because the name of the content type stream is not a part name and // the information it contains is needed to recognize valid parts. if (_zipArchive.Mode == ZipArchiveMode.Read || _zipArchive.Mode == ZipArchiveMode.Update) ParseContentTypesFile(_zipArchive.Entries); //No contents to persist to the disk - _dirty = false; //by default //Lazy initialize these members as required //_overrideDictionary - Overrides should be rare //_contentTypeFileInfo - We will either find an atomin part, or //_contentTypeStreamPieces - an interleaved part //_contentTypeStreamExists - defaults to false - not yet found } internal static string ContentTypeFileName { get { return ContentTypesFile; } } //Adds the Default entry if it is the first time we come across //the extension for the partUri, does nothing if the content type //corresponding to the default entry for the extension matches or //adds a override corresponding to this part and content type. //This call is made when a new part is being added to the package. // This method assumes the partUri is valid. internal void AddContentType(PackUriHelper.ValidatedPartUri partUri, ContentType contentType, CompressionLevel compressionLevel) { //save the compressionOption and deflateOption that should be used //to create the content type item later if (!_contentTypeStreamExists) { _cachedCompressionLevel = compressionLevel; } // Figure out whether the mapping matches a default entry, can be made into a new // default entry, or has to be entered as an override entry. bool foundMatchingDefault = false; string extension = partUri.PartUriExtension; // Need to create an override entry? if (extension.Length == 0 || (_defaultDictionary.ContainsKey(extension) && !(foundMatchingDefault = _defaultDictionary[extension].AreTypeAndSubTypeEqual(contentType)))) { AddOverrideElement(partUri, contentType); } // Else, either there is already a mapping from extension to contentType, // or one needs to be created. else if (!foundMatchingDefault) { AddDefaultElement(extension, contentType); } } //Returns the content type for the part, if present, else returns null. internal ContentType? GetContentType(PackUriHelper.ValidatedPartUri partUri) { //Step 1: Check if there is an override entry present corresponding to the //partUri provided. Override takes precedence over the default entries if (_overrideDictionary != null) { if (_overrideDictionary.ContainsKey(partUri)) return _overrideDictionary[partUri]; } //Step 2: Check if there is a default entry corresponding to the //extension of the partUri provided. string extension = partUri.PartUriExtension; if (_defaultDictionary.ContainsKey(extension)) return _defaultDictionary[extension]; //Step 3: If we did not find an entry in the override and the default //dictionaries, this is an error condition return null; } //Deletes the override entry corresponding to the partUri, if it exists internal void DeleteContentType(PackUriHelper.ValidatedPartUri partUri) { if (_overrideDictionary != null) { if (_overrideDictionary.Remove(partUri)) _dirty = true; } } internal void SaveToFile() { if (_dirty) { //Lazy init: Initialize when the first part is added. if (!_contentTypeStreamExists) { _contentTypeZipArchiveEntry = _zipArchive.CreateEntry(ContentTypesFile, _cachedCompressionLevel); _contentTypeStreamExists = true; } else { // delete and re-create entry for content part. When writing this, the stream will not truncate the content // if the XML is shorter than the existing content part. var contentTypefullName = _contentTypeZipArchiveEntry!.FullName; var thisArchive = _contentTypeZipArchiveEntry.Archive; _zipStreamManager.Close(_contentTypeZipArchiveEntry); _contentTypeZipArchiveEntry.Delete(); _contentTypeZipArchiveEntry = thisArchive.CreateEntry(contentTypefullName); } using (Stream s = _zipStreamManager.Open(_contentTypeZipArchiveEntry, _packageFileMode, FileAccess.ReadWrite)) { // use UTF-8 encoding by default using (XmlWriter writer = XmlWriter.Create(s, new XmlWriterSettings { Encoding = System.Text.Encoding.UTF8 })) { writer.WriteStartDocument(); // write root element tag - Types writer.WriteStartElement(TypesTagName, TypesNamespaceUri); // for each default entry foreach (string key in _defaultDictionary.Keys) { WriteDefaultElement(writer, key, _defaultDictionary[key]); } if (_overrideDictionary != null) { // for each override entry foreach (PackUriHelper.ValidatedPartUri key in _overrideDictionary.Keys) { WriteOverrideElement(writer, key, _overrideDictionary[key]); } } // end of Types tag writer.WriteEndElement(); // close the document writer.WriteEndDocument(); _dirty = false; } } } } [MemberNotNull(nameof(_overrideDictionary))] private void EnsureOverrideDictionary() { // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using the PartUriComparer if (_overrideDictionary == null) _overrideDictionary = new Dictionary<PackUriHelper.ValidatedPartUri, ContentType>(OverrideDictionaryInitialSize); } private void ParseContentTypesFile(System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipFiles) { // Find the content type stream, allowing for interleaving. Naming collisions // (as between an atomic and an interleaved part) will result in an exception being thrown. Stream? s = OpenContentTypeStream(zipFiles); // Allow non-existent content type stream. if (s == null) return; XmlReaderSettings xrs = new XmlReaderSettings(); xrs.IgnoreWhitespace = true; using (s) using (XmlReader reader = XmlReader.Create(s, xrs)) { //This method expects the reader to be in ReadState.Initial. //It will make the first read call. PackagingUtilities.PerformInitialReadAndVerifyEncoding(reader); //Note: After the previous method call the reader should be at the first tag in the markup. //MoveToContent - Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace //If the reader is currently at a content node then this function call is a no-op reader.MoveToContent(); // look for our root tag and namespace pair - ignore others in case of version changes // Make sure that the current node read is an Element if ((reader.NodeType == XmlNodeType.Element) && (reader.Depth == 0) && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, TypesTagName) == 0)) { //There should be a namespace Attribute present at this level. //Also any other attribute on the <Types> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) > 0) { throw new XmlException(SR.TypesTagHasExtraAttributes, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } // start tag encountered // now parse individual Default and Override tags while (reader.Read()) { //Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace //If the reader is currently at a content node then this function call is a no-op reader.MoveToContent(); //If MoveToContent() takes us to the end of the content if (reader.NodeType == XmlNodeType.None) continue; // Make sure that the current node read is an element // Currently we expect the Default and Override Tag at Depth 1 if (reader.NodeType == XmlNodeType.Element && reader.Depth == 1 && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, DefaultTagName) == 0)) { ProcessDefaultTagAttributes(reader); } else if (reader.NodeType == XmlNodeType.Element && reader.Depth == 1 && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, OverrideTagName) == 0)) { ProcessOverrideTagAttributes(reader); } else if (reader.NodeType == XmlNodeType.EndElement && reader.Depth == 0 && string.CompareOrdinal(reader.Name, TypesTagName) == 0) { continue; } else { throw new XmlException(SR.TypesXmlDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } } } else { throw new XmlException(SR.TypesElementExpected, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } } } /// <summary> /// Find the content type stream, allowing for interleaving. Naming collisions /// (as between an atomic and an interleaved part) will result in an exception being thrown. /// Return null if no content type stream has been found. /// </summary> /// <remarks> /// The input array is lexicographically sorted /// </remarks> private Stream? OpenContentTypeStream(System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipFiles) { foreach (ZipArchiveEntry zipFileInfo in zipFiles) { if (zipFileInfo.Name.ToUpperInvariant().StartsWith(ContentTypesFileUpperInvariant, StringComparison.Ordinal)) { // Atomic name. if (zipFileInfo.Name.Length == ContentTypeFileName.Length) { // Record the file info. _contentTypeZipArchiveEntry = zipFileInfo; } } } // If an atomic file was found, open a stream on it. if (_contentTypeZipArchiveEntry != null) { _contentTypeStreamExists = true; return _zipStreamManager.Open(_contentTypeZipArchiveEntry, _packageFileMode, FileAccess.ReadWrite); } // No content type stream was found. return null; } // Process the attributes for the Default tag private void ProcessDefaultTagAttributes(XmlReader reader) { //There could be a namespace Attribute present at this level. //Also any other attribute on the <Default> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) != 2) throw new XmlException(SR.DefaultTagDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); // get the required Extension and ContentType attributes string? extensionAttributeValue = reader.GetAttribute(ExtensionAttributeName); ValidateXmlAttribute(ExtensionAttributeName, extensionAttributeValue, DefaultTagName, reader); string? contentTypeAttributeValue = reader.GetAttribute(ContentTypeAttributeName); ThrowIfXmlAttributeMissing(ContentTypeAttributeName, contentTypeAttributeValue, DefaultTagName, reader); // The extensions are stored in the Default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. PackUriHelper.ValidatedPartUri temporaryUri = PackUriHelper.ValidatePartUri( new Uri(TemporaryPartNameWithoutExtension + extensionAttributeValue, UriKind.Relative)); _defaultDictionary.Add(temporaryUri.PartUriExtension, new ContentType(contentTypeAttributeValue!)); //Skip the EndElement for Default Tag if (!reader.IsEmptyElement) ProcessEndElement(reader, DefaultTagName); } // Process the attributes for the Default tag private void ProcessOverrideTagAttributes(XmlReader reader) { //There could be a namespace Attribute present at this level. //Also any other attribute on the <Override> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) != 2) throw new XmlException(SR.OverrideTagDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); // get the required Extension and ContentType attributes string? partNameAttributeValue = reader.GetAttribute(PartNameAttributeName); ValidateXmlAttribute(PartNameAttributeName, partNameAttributeValue, OverrideTagName, reader); string? contentTypeAttributeValue = reader.GetAttribute(ContentTypeAttributeName); ThrowIfXmlAttributeMissing(ContentTypeAttributeName, contentTypeAttributeValue, OverrideTagName, reader); PackUriHelper.ValidatedPartUri partUri = PackUriHelper.ValidatePartUri(new Uri(partNameAttributeValue!, UriKind.Relative)); //Lazy initializing - ensure that the override dictionary has been initialized EnsureOverrideDictionary(); // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using PartUriComparer. _overrideDictionary.Add(partUri, new ContentType(contentTypeAttributeValue!)); //Skip the EndElement for Override Tag if (!reader.IsEmptyElement) ProcessEndElement(reader, OverrideTagName); } //If End element is present for Relationship then we process it private void ProcessEndElement(XmlReader reader, string elementName) { Debug.Assert(!reader.IsEmptyElement, "This method should only be called it the Relationship Element is not empty"); reader.Read(); //Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace reader.MoveToContent(); if (reader.NodeType == XmlNodeType.EndElement && string.CompareOrdinal(elementName, reader.LocalName) == 0) return; else throw new XmlException(SR.Format(SR.ElementIsNotEmptyElement, elementName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } private void AddOverrideElement(PackUriHelper.ValidatedPartUri partUri, ContentType contentType) { //Delete any entry corresponding in the Override dictionary //corresponding to the PartUri for which the contentType is being added. //This is to compensate for dead override entries in the content types file. DeleteContentType(partUri); //Lazy initializing - ensure that the override dictionary has been initialized EnsureOverrideDictionary(); // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using PartUriComparer. _overrideDictionary.Add(partUri, contentType); _dirty = true; } private void AddDefaultElement(string extension, ContentType contentType) { // The extensions are stored in the Default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. _defaultDictionary.Add(extension, contentType); _dirty = true; } private void WriteOverrideElement(XmlWriter xmlWriter, PackUriHelper.ValidatedPartUri partUri, ContentType contentType) { xmlWriter.WriteStartElement(OverrideTagName); xmlWriter.WriteAttributeString(PartNameAttributeName, partUri.PartUriString); xmlWriter.WriteAttributeString(ContentTypeAttributeName, contentType.ToString()); xmlWriter.WriteEndElement(); } private void WriteDefaultElement(XmlWriter xmlWriter, string extension, ContentType contentType) { xmlWriter.WriteStartElement(DefaultTagName); xmlWriter.WriteAttributeString(ExtensionAttributeName, extension); xmlWriter.WriteAttributeString(ContentTypeAttributeName, contentType.ToString()); xmlWriter.WriteEndElement(); } //Validate if the required XML attribute is present and not an empty string private void ValidateXmlAttribute(string attributeName, string? attributeValue, string tagName, XmlReader reader) { ThrowIfXmlAttributeMissing(attributeName, attributeValue, tagName, reader); //Checking for empty attribute if (attributeValue!.Length == 0) throw new XmlException(SR.Format(SR.RequiredAttributeEmpty, tagName, attributeName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } //Validate if the required Content type XML attribute is present //Content type of a part can be empty private void ThrowIfXmlAttributeMissing(string attributeName, string? attributeValue, string tagName, XmlReader reader) { if (attributeValue == null) throw new XmlException(SR.Format(SR.RequiredAttributeMissing, tagName, attributeName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } private Dictionary<PackUriHelper.ValidatedPartUri, ContentType>? _overrideDictionary; private readonly Dictionary<string, ContentType> _defaultDictionary; private readonly ZipArchive _zipArchive; private readonly FileMode _packageFileMode; private readonly FileAccess _packageFileAccess; private readonly ZipStreamManager _zipStreamManager; private ZipArchiveEntry? _contentTypeZipArchiveEntry; private bool _contentTypeStreamExists; private bool _dirty; private CompressionLevel _cachedCompressionLevel; private const string ContentTypesFile = "[Content_Types].xml"; private const string ContentTypesFileUpperInvariant = "[CONTENT_TYPES].XML"; private const int DefaultDictionaryInitialSize = 16; private const int OverrideDictionaryInitialSize = 8; //Xml tag specific strings for the Content Type file private const string TypesNamespaceUri = "http://schemas.openxmlformats.org/package/2006/content-types"; private const string TypesTagName = "Types"; private const string DefaultTagName = "Default"; private const string ExtensionAttributeName = "Extension"; private const string ContentTypeAttributeName = "ContentType"; private const string OverrideTagName = "Override"; private const string PartNameAttributeName = "PartName"; private const string TemporaryPartNameWithoutExtension = "/tempfiles/sample."; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Xml; //Required for Content Type File manipulation using System.Diagnostics; using System.IO.Compression; using System.Diagnostics.CodeAnalysis; namespace System.IO.Packaging { /// <summary> /// ZipPackage is a specific implementation for the abstract Package /// class, corresponding to the Zip file format. /// This is a part of the Packaging Layer APIs. /// </summary> public sealed class ZipPackage : Package { #region Public Methods #region PackagePart Methods /// <summary> /// This method is for custom implementation for the underlying file format /// Adds a new item to the zip archive corresponding to the PackagePart in the package. /// </summary> /// <param name="partUri">PartName</param> /// <param name="contentType">Content type of the part</param> /// <param name="compressionOption">Compression option for this part</param> /// <returns></returns> /// <exception cref="ArgumentNullException">If partUri parameter is null</exception> /// <exception cref="ArgumentNullException">If contentType parameter is null</exception> /// <exception cref="ArgumentException">If partUri parameter does not conform to the valid partUri syntax</exception> /// <exception cref="ArgumentOutOfRangeException">If CompressionOption enumeration [compressionOption] does not have one of the valid values</exception> protected override PackagePart CreatePartCore(Uri partUri, string contentType, CompressionOption compressionOption) { //Validating the PartUri - this method will do the argument checking required for uri. partUri = PackUriHelper.ValidatePartUri(partUri); if (contentType == null) throw new ArgumentNullException(nameof(contentType)); Package.ThrowIfCompressionOptionInvalid(compressionOption); // Convert XPS CompressionOption to Zip CompressionMethodEnum. CompressionLevel level; GetZipCompressionMethodFromOpcCompressionOption(compressionOption, out level); // Create new Zip item. // We need to remove the leading "/" character at the beginning of the part name. // The partUri object must be a ValidatedPartUri string zipItemName = ((PackUriHelper.ValidatedPartUri)partUri).PartUriString.Substring(1); ZipArchiveEntry zipArchiveEntry = _zipArchive.CreateEntry(zipItemName, level); //Store the content type of this part in the content types stream. _contentTypeHelper.AddContentType((PackUriHelper.ValidatedPartUri)partUri, new ContentType(contentType), level); return new ZipPackagePart(this, zipArchiveEntry.Archive, zipArchiveEntry, _zipStreamManager, (PackUriHelper.ValidatedPartUri)partUri, contentType, compressionOption); } /// <summary> /// This method is for custom implementation specific to the file format. /// Returns the part after reading the actual physical bits. The method /// returns a null to indicate that the part corresponding to the specified /// Uri was not found in the container. /// This method does not throw an exception if a part does not exist. /// </summary> /// <param name="partUri"></param> /// <returns></returns> protected override PackagePart? GetPartCore(Uri partUri) { //Currently the design has two aspects which makes it possible to return //a null from this method - // 1. All the parts are loaded at Package.Open time and as such, this // method would not be invoked, unless the user is asking for - // i. a part that does not exist - we can safely return null // ii.a part(interleaved/non-interleaved) that was added to the // underlying package by some other means, and the user wants to // access the updated part. This is currently not possible as the // underlying zip i/o layer does not allow for FileShare.ReadWrite. // 2. Also, its not a straightforward task to determine if a new part was // added as we need to look for atomic as well as interleaved parts and // this has to be done in a case sensitive manner. So, effectively // we will have to go through the entire list of zip items to determine // if there are any updates. // If ever the design changes, then this method must be updated accordingly return null; } /// <summary> /// This method is for custom implementation specific to the file format. /// Deletes the part corresponding to the uri specified. Deleting a part that does not /// exists is not an error and so we do not throw an exception in that case. /// </summary> /// <param name="partUri"></param> /// <exception cref="ArgumentNullException">If partUri parameter is null</exception> /// <exception cref="ArgumentException">If partUri parameter does not conform to the valid partUri syntax</exception> protected override void DeletePartCore(Uri partUri) { //Validating the PartUri - this method will do the argument checking required for uri. partUri = PackUriHelper.ValidatePartUri(partUri); string partZipName = GetZipItemNameFromOpcName(PackUriHelper.GetStringForPartUri(partUri)); ZipArchiveEntry? zipArchiveEntry = _zipArchive.GetEntry(partZipName); if (zipArchiveEntry != null) { // Case of an atomic part. zipArchiveEntry.Delete(); } //Delete the content type for this part if it was specified as an override _contentTypeHelper.DeleteContentType((PackUriHelper.ValidatedPartUri)partUri); } /// <summary> /// This method is for custom implementation specific to the file format. /// This is the method that knows how to get the actual parts from the underlying /// zip archive. /// </summary> /// <remarks> /// <para> /// Some or all of the parts may be interleaved. The Part object for an interleaved part encapsulates /// the Uri of the proper part name and the ZipFileInfo of the initial piece. /// This function does not go through the extra work of checking piece naming validity /// throughout the package. /// </para> /// <para> /// This means that interleaved parts without an initial piece will be silently ignored. /// Other naming anomalies get caught at the Stream level when an I/O operation involves /// an anomalous or missing piece. /// </para> /// <para> /// This function reads directly from the underlying IO layer and is supposed to be called /// just once in the lifetime of a package (at init time). /// </para> /// </remarks> /// <returns>An array of ZipPackagePart.</returns> protected override PackagePart[] GetPartsCore() { List<PackagePart> parts = new List<PackagePart>(InitialPartListSize); // The list of files has to be searched linearly (1) to identify the content type // stream, and (2) to identify parts. System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipArchiveEntries = _zipArchive.Entries; // We have already identified the [ContentTypes].xml pieces if any are present during // the initialization of ZipPackage object // Record parts and ignored items. foreach (ZipArchiveEntry zipArchiveEntry in zipArchiveEntries) { //Returns false if - // a. its a content type item // b. items that have either a leading or trailing slash. if (IsZipItemValidOpcPartOrPiece(zipArchiveEntry.FullName)) { Uri partUri = new Uri(GetOpcNameFromZipItemName(zipArchiveEntry.FullName), UriKind.Relative); if (PackUriHelper.TryValidatePartUri(partUri, out PackUriHelper.ValidatedPartUri? validatedPartUri)) { ContentType? contentType = _contentTypeHelper.GetContentType(validatedPartUri); if (contentType != null) { // In case there was some redundancy between pieces and/or the atomic // part, it will be detected at this point because the part's Uri (which // is independent of interleaving) will already be in the dictionary. parts.Add(new ZipPackagePart(this, zipArchiveEntry.Archive, zipArchiveEntry, _zipStreamManager, validatedPartUri, contentType.ToString(), GetCompressionOptionFromZipFileInfo(zipArchiveEntry))); } } //If not valid part uri we can completely ignore this zip file item. Even if later someone adds //a new part, the corresponding zip item can never map to one of these items } // If IsZipItemValidOpcPartOrPiece returns false, it implies that either the zip file Item // starts or ends with a "/" and as such we can completely ignore this zip file item. Even if later // a new part gets added, its corresponding zip item cannot map to one of these items. } return parts.ToArray(); } #endregion PackagePart Methods #region Other Methods /// <summary> /// This method is for custom implementation corresponding to the underlying zip file format. /// </summary> protected override void FlushCore() { //Save the content type file to the archive. _contentTypeHelper.SaveToFile(); } /// <summary> /// Closes the underlying ZipArchive object for this container /// </summary> /// <param name="disposing">True if called during Dispose, false if called during Finalize</param> protected override void Dispose(bool disposing) { try { if (disposing) { if (_contentTypeHelper != null) { _contentTypeHelper.SaveToFile(); } if (_zipStreamManager != null) { _zipStreamManager.Dispose(); } if (_zipArchive != null) { _zipArchive.Dispose(); } // _containerStream may be opened given a file name, in which case it should be closed here. // _containerStream may be passed into the constructor, in which case, it should not be closed here. if (_shouldCloseContainerStream) { _containerStream.Dispose(); } else { } _containerStream = null!; } } finally { base.Dispose(disposing); } } #endregion Other Methods #endregion Public Methods #region Internal Constructors /// <summary> /// Internal constructor that is called by the OpenOnFile static method. /// </summary> /// <param name="path">File path to the container.</param> /// <param name="packageFileMode">Container is opened in the specified mode if possible</param> /// <param name="packageFileAccess">Container is opened with the specified access if possible</param> /// <param name="share">Container is opened with the specified share if possible</param> internal ZipPackage(string path, FileMode packageFileMode, FileAccess packageFileAccess, FileShare share) : base(packageFileAccess) { ZipArchive? zipArchive = null; ContentTypeHelper? contentTypeHelper; _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; try { _containerStream = new FileStream(path, _packageFileMode, _packageFileAccess, share); _shouldCloseContainerStream = true; ZipArchiveMode zipArchiveMode = ZipArchiveMode.Update; if (packageFileAccess == FileAccess.Read) zipArchiveMode = ZipArchiveMode.Read; else if (packageFileAccess == FileAccess.Write) zipArchiveMode = ZipArchiveMode.Create; else if (packageFileAccess == FileAccess.ReadWrite) zipArchiveMode = ZipArchiveMode.Update; zipArchive = new ZipArchive(_containerStream, zipArchiveMode, true, Text.Encoding.UTF8); _zipStreamManager = new ZipStreamManager(zipArchive, _packageFileMode, _packageFileAccess); contentTypeHelper = new ContentTypeHelper(zipArchive, _packageFileMode, _packageFileAccess, _zipStreamManager); } catch { zipArchive?.Dispose(); _containerStream?.Dispose(); throw; } _zipArchive = zipArchive; _contentTypeHelper = contentTypeHelper; } /// <summary> /// Internal constructor that is called by the Open(Stream) static methods. /// </summary> /// <param name="s"></param> /// <param name="packageFileMode"></param> /// <param name="packageFileAccess"></param> internal ZipPackage(Stream s, FileMode packageFileMode, FileAccess packageFileAccess) : base(packageFileAccess) { ZipArchive? zipArchive = null; ContentTypeHelper? contentTypeHelper; _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; try { if (s.CanSeek) { switch (packageFileMode) { case FileMode.Open: if (s.Length == 0) { throw new FileFormatException(SR.ZipZeroSizeFileIsNotValidArchive); } break; case FileMode.CreateNew: if (s.Length != 0) { throw new IOException(SR.CreateNewOnNonEmptyStream); } break; case FileMode.Create: if (s.Length != 0) { s.SetLength(0); // Discard existing data } break; } } ZipArchiveMode zipArchiveMode = ZipArchiveMode.Update; if (packageFileAccess == FileAccess.Read) zipArchiveMode = ZipArchiveMode.Read; else if (packageFileAccess == FileAccess.Write) zipArchiveMode = ZipArchiveMode.Create; else if (packageFileAccess == FileAccess.ReadWrite) zipArchiveMode = ZipArchiveMode.Update; zipArchive = new ZipArchive(s, zipArchiveMode, true, Text.Encoding.UTF8); _zipStreamManager = new ZipStreamManager(zipArchive, packageFileMode, packageFileAccess); contentTypeHelper = new ContentTypeHelper(zipArchive, packageFileMode, packageFileAccess, _zipStreamManager); } catch (InvalidDataException) { throw new FileFormatException(SR.FileContainsCorruptedData); } catch { if (zipArchive != null) { zipArchive.Dispose(); } throw; } _containerStream = s; _shouldCloseContainerStream = false; _zipArchive = zipArchive; _contentTypeHelper = contentTypeHelper; } #endregion Internal Constructors #region Internal Methods // More generic function than GetZipItemNameFromPartName. In particular, it will handle piece names. internal static string GetZipItemNameFromOpcName(string opcName) { Debug.Assert(opcName != null && opcName.Length > 0); return opcName.Substring(1); } // More generic function than GetPartNameFromZipItemName. In particular, it will handle piece names. internal static string GetOpcNameFromZipItemName(string zipItemName) { return string.Concat(ForwardSlashString, zipItemName); } // Convert from XPS CompressionOption to ZipFileInfo compression properties. internal static void GetZipCompressionMethodFromOpcCompressionOption( CompressionOption compressionOption, out CompressionLevel compressionLevel) { switch (compressionOption) { case CompressionOption.NotCompressed: { compressionLevel = CompressionLevel.NoCompression; } break; case CompressionOption.Normal: { compressionLevel = CompressionLevel.Optimal; } break; case CompressionOption.Maximum: { compressionLevel = CompressionLevel.Optimal; } break; case CompressionOption.Fast: { compressionLevel = CompressionLevel.Fastest; } break; case CompressionOption.SuperFast: { compressionLevel = CompressionLevel.Fastest; } break; // fall-through is not allowed default: { Debug.Fail("Encountered an invalid CompressionOption enum value"); goto case CompressionOption.NotCompressed; } } } #endregion Internal Methods internal FileMode PackageFileMode { get { return _packageFileMode; } } #region Private Methods //returns a boolean indicating if the underlying zip item is a valid metro part or piece // This mainly excludes the content type item, as well as entries with leading or trailing // slashes. private bool IsZipItemValidOpcPartOrPiece(string zipItemName) { Debug.Assert(zipItemName != null, "The parameter zipItemName should not be null"); //check if the zip item is the Content type item -case sensitive comparison // The following test will filter out an atomic content type file, with name // "[Content_Types].xml", as well as an interleaved one, with piece names such as // "[Content_Types].xml/[0].piece" or "[Content_Types].xml/[5].last.piece". if (zipItemName.StartsWith(ContentTypeHelper.ContentTypeFileName, StringComparison.OrdinalIgnoreCase)) return false; else { //Could be an empty zip folder //We decided to ignore zip items that contain a "/" as this could be a folder in a zip archive //Some of the tools support this and some don't. There is no way ensure that the zip item never have //a leading "/", although this is a requirement we impose on items created through our API //Therefore we ignore them at the packaging api level. if (zipItemName.StartsWith(ForwardSlashString, StringComparison.Ordinal)) return false; //This will ignore the folder entries found in the zip package created by some zip tool //PartNames ending with a "/" slash is also invalid so we are skipping these entries, //this will also prevent the PackUriHelper.CreatePartUri from throwing when it encounters a // partname ending with a "/" if (zipItemName.EndsWith(ForwardSlashString, StringComparison.Ordinal)) return false; else return true; } } // convert from Zip CompressionMethodEnum and DeflateOptionEnum to XPS CompressionOption private static CompressionOption GetCompressionOptionFromZipFileInfo(ZipArchiveEntry zipFileInfo) { // Note: we can't determine compression method / level from the ZipArchiveEntry. CompressionOption result = CompressionOption.Normal; return result; } #endregion Private Methods #region Private Members private const int InitialPartListSize = 50; private readonly ZipArchive _zipArchive; private Stream _containerStream; // stream we are opened in if Open(Stream) was called private readonly bool _shouldCloseContainerStream; private readonly ContentTypeHelper _contentTypeHelper; // manages the content types for all the parts in the container private readonly ZipStreamManager _zipStreamManager; // manages streams for all parts, avoiding opening streams multiple times private readonly FileAccess _packageFileAccess; private readonly FileMode _packageFileMode; private const string ForwardSlashString = "/"; //Required for creating a part name from a zip item name //IEqualityComparer for extensions private static readonly ExtensionEqualityComparer s_extensionEqualityComparer = new ExtensionEqualityComparer(); #endregion Private Members /// <summary> /// ExtensionComparer /// The Extensions are stored in the Default Dictionary in their original form, /// however they are compared in a normalized manner. /// Equivalence for extensions in the content type stream, should follow /// the same rules as extensions of partnames. Also, by the time this code is invoked, /// we have already validated, that the extension is in the correct format as per the /// part name rules.So we are simplifying the logic here to just convert the extensions /// to Upper invariant form and then compare them. /// </summary> private sealed class ExtensionEqualityComparer : IEqualityComparer<string> { bool IEqualityComparer<string>.Equals(string? extensionA, string? extensionB) { Debug.Assert(extensionA != null, "extension should not be null"); Debug.Assert(extensionB != null, "extension should not be null"); //Important Note: any change to this should be made in accordance //with the rules for comparing/normalizing partnames. //Refer to PackUriHelper.ValidatedPartUri.GetNormalizedPartUri method. //Currently normalization just involves upper-casing ASCII and hence the simplification. return (string.CompareOrdinal(extensionA.ToUpperInvariant(), extensionB.ToUpperInvariant()) == 0); } int IEqualityComparer<string>.GetHashCode(string extension) { Debug.Assert(extension != null, "extension should not be null"); //Important Note: any change to this should be made in accordance //with the rules for comparing/normalizing partnames. //Refer to PackUriHelper.ValidatedPartUri.GetNormalizedPartUri method. //Currently normalization just involves upper-casing ASCII and hence the simplification. return extension.ToUpperInvariant().GetHashCode(); } } /// <summary> /// This is a helper class that maintains the Content Types File related to /// this ZipPackage. /// </summary> private sealed class ContentTypeHelper { /// <summary> /// Initialize the object without uploading any information from the package. /// Complete initialization in read mode also involves calling ParseContentTypesFile /// to deserialize content type information. /// </summary> internal ContentTypeHelper(ZipArchive zipArchive, FileMode packageFileMode, FileAccess packageFileAccess, ZipStreamManager zipStreamManager) { _zipArchive = zipArchive; //initialized in the ZipPackage constructor _packageFileMode = packageFileMode; _packageFileAccess = packageFileAccess; _zipStreamManager = zipStreamManager; //initialized in the ZipPackage constructor // The extensions are stored in the default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. _defaultDictionary = new Dictionary<string, ContentType>(DefaultDictionaryInitialSize, s_extensionEqualityComparer); // Identify the content type file or files before identifying parts and piece sequences. // This is necessary because the name of the content type stream is not a part name and // the information it contains is needed to recognize valid parts. if (_zipArchive.Mode == ZipArchiveMode.Read || _zipArchive.Mode == ZipArchiveMode.Update) ParseContentTypesFile(_zipArchive.Entries); //No contents to persist to the disk - _dirty = false; //by default //Lazy initialize these members as required //_overrideDictionary - Overrides should be rare //_contentTypeFileInfo - We will either find an atomin part, or //_contentTypeStreamPieces - an interleaved part //_contentTypeStreamExists - defaults to false - not yet found } internal static string ContentTypeFileName { get { return ContentTypesFile; } } //Adds the Default entry if it is the first time we come across //the extension for the partUri, does nothing if the content type //corresponding to the default entry for the extension matches or //adds a override corresponding to this part and content type. //This call is made when a new part is being added to the package. // This method assumes the partUri is valid. internal void AddContentType(PackUriHelper.ValidatedPartUri partUri, ContentType contentType, CompressionLevel compressionLevel) { //save the compressionOption and deflateOption that should be used //to create the content type item later if (!_contentTypeStreamExists) { _cachedCompressionLevel = compressionLevel; } // Figure out whether the mapping matches a default entry, can be made into a new // default entry, or has to be entered as an override entry. bool foundMatchingDefault = false; string extension = partUri.PartUriExtension; // Need to create an override entry? if (extension.Length == 0 || (_defaultDictionary.ContainsKey(extension) && !(foundMatchingDefault = _defaultDictionary[extension].AreTypeAndSubTypeEqual(contentType)))) { AddOverrideElement(partUri, contentType); } // Else, either there is already a mapping from extension to contentType, // or one needs to be created. else if (!foundMatchingDefault) { AddDefaultElement(extension, contentType); } } //Returns the content type for the part, if present, else returns null. internal ContentType? GetContentType(PackUriHelper.ValidatedPartUri partUri) { //Step 1: Check if there is an override entry present corresponding to the //partUri provided. Override takes precedence over the default entries if (_overrideDictionary != null) { if (_overrideDictionary.ContainsKey(partUri)) return _overrideDictionary[partUri]; } //Step 2: Check if there is a default entry corresponding to the //extension of the partUri provided. string extension = partUri.PartUriExtension; if (_defaultDictionary.ContainsKey(extension)) return _defaultDictionary[extension]; //Step 3: If we did not find an entry in the override and the default //dictionaries, this is an error condition return null; } //Deletes the override entry corresponding to the partUri, if it exists internal void DeleteContentType(PackUriHelper.ValidatedPartUri partUri) { if (_overrideDictionary != null) { if (_overrideDictionary.Remove(partUri)) _dirty = true; } } internal void SaveToFile() { if (_dirty) { //Lazy init: Initialize when the first part is added. if (!_contentTypeStreamExists) { _contentTypeZipArchiveEntry = _zipArchive.CreateEntry(ContentTypesFile, _cachedCompressionLevel); _contentTypeStreamExists = true; } else { // delete and re-create entry for content part. When writing this, the stream will not truncate the content // if the XML is shorter than the existing content part. var contentTypefullName = _contentTypeZipArchiveEntry!.FullName; var thisArchive = _contentTypeZipArchiveEntry.Archive; _zipStreamManager.Close(_contentTypeZipArchiveEntry); _contentTypeZipArchiveEntry.Delete(); _contentTypeZipArchiveEntry = thisArchive.CreateEntry(contentTypefullName); } using (Stream s = _zipStreamManager.Open(_contentTypeZipArchiveEntry, _packageFileMode, FileAccess.ReadWrite)) { // use UTF-8 encoding by default using (XmlWriter writer = XmlWriter.Create(s, new XmlWriterSettings { Encoding = System.Text.Encoding.UTF8 })) { writer.WriteStartDocument(); // write root element tag - Types writer.WriteStartElement(TypesTagName, TypesNamespaceUri); // for each default entry foreach (string key in _defaultDictionary.Keys) { WriteDefaultElement(writer, key, _defaultDictionary[key]); } if (_overrideDictionary != null) { // for each override entry foreach (PackUriHelper.ValidatedPartUri key in _overrideDictionary.Keys) { WriteOverrideElement(writer, key, _overrideDictionary[key]); } } // end of Types tag writer.WriteEndElement(); // close the document writer.WriteEndDocument(); _dirty = false; } } } } [MemberNotNull(nameof(_overrideDictionary))] private void EnsureOverrideDictionary() { // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using the PartUriComparer if (_overrideDictionary == null) _overrideDictionary = new Dictionary<PackUriHelper.ValidatedPartUri, ContentType>(OverrideDictionaryInitialSize); } private void ParseContentTypesFile(System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipFiles) { // Find the content type stream, allowing for interleaving. Naming collisions // (as between an atomic and an interleaved part) will result in an exception being thrown. Stream? s = OpenContentTypeStream(zipFiles); // Allow non-existent content type stream. if (s == null) return; XmlReaderSettings xrs = new XmlReaderSettings(); xrs.IgnoreWhitespace = true; using (s) using (XmlReader reader = XmlReader.Create(s, xrs)) { //This method expects the reader to be in ReadState.Initial. //It will make the first read call. PackagingUtilities.PerformInitialReadAndVerifyEncoding(reader); //Note: After the previous method call the reader should be at the first tag in the markup. //MoveToContent - Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace //If the reader is currently at a content node then this function call is a no-op reader.MoveToContent(); // look for our root tag and namespace pair - ignore others in case of version changes // Make sure that the current node read is an Element if ((reader.NodeType == XmlNodeType.Element) && (reader.Depth == 0) && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, TypesTagName) == 0)) { //There should be a namespace Attribute present at this level. //Also any other attribute on the <Types> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) > 0) { throw new XmlException(SR.TypesTagHasExtraAttributes, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } // start tag encountered // now parse individual Default and Override tags while (reader.Read()) { //Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace //If the reader is currently at a content node then this function call is a no-op reader.MoveToContent(); //If MoveToContent() takes us to the end of the content if (reader.NodeType == XmlNodeType.None) continue; // Make sure that the current node read is an element // Currently we expect the Default and Override Tag at Depth 1 if (reader.NodeType == XmlNodeType.Element && reader.Depth == 1 && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, DefaultTagName) == 0)) { ProcessDefaultTagAttributes(reader); } else if (reader.NodeType == XmlNodeType.Element && reader.Depth == 1 && (string.CompareOrdinal(reader.NamespaceURI, TypesNamespaceUri) == 0) && (string.CompareOrdinal(reader.Name, OverrideTagName) == 0)) { ProcessOverrideTagAttributes(reader); } else if (reader.NodeType == XmlNodeType.EndElement && reader.Depth == 0 && string.CompareOrdinal(reader.Name, TypesTagName) == 0) { continue; } else { throw new XmlException(SR.TypesXmlDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } } } else { throw new XmlException(SR.TypesElementExpected, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } } } /// <summary> /// Find the content type stream, allowing for interleaving. Naming collisions /// (as between an atomic and an interleaved part) will result in an exception being thrown. /// Return null if no content type stream has been found. /// </summary> /// <remarks> /// The input array is lexicographically sorted /// </remarks> private Stream? OpenContentTypeStream(System.Collections.ObjectModel.ReadOnlyCollection<ZipArchiveEntry> zipFiles) { foreach (ZipArchiveEntry zipFileInfo in zipFiles) { if (zipFileInfo.Name.ToUpperInvariant().StartsWith(ContentTypesFileUpperInvariant, StringComparison.Ordinal)) { // Atomic name. if (zipFileInfo.Name.Length == ContentTypeFileName.Length) { // Record the file info. _contentTypeZipArchiveEntry = zipFileInfo; } } } // If an atomic file was found, open a stream on it. if (_contentTypeZipArchiveEntry != null) { _contentTypeStreamExists = true; return _zipStreamManager.Open(_contentTypeZipArchiveEntry, _packageFileMode, FileAccess.ReadWrite); } // No content type stream was found. return null; } // Process the attributes for the Default tag private void ProcessDefaultTagAttributes(XmlReader reader) { //There could be a namespace Attribute present at this level. //Also any other attribute on the <Default> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) != 2) throw new XmlException(SR.DefaultTagDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); // get the required Extension and ContentType attributes string? extensionAttributeValue = reader.GetAttribute(ExtensionAttributeName); ValidateXmlAttribute(ExtensionAttributeName, extensionAttributeValue, DefaultTagName, reader); string? contentTypeAttributeValue = reader.GetAttribute(ContentTypeAttributeName); ThrowIfXmlAttributeMissing(ContentTypeAttributeName, contentTypeAttributeValue, DefaultTagName, reader); // The extensions are stored in the Default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. PackUriHelper.ValidatedPartUri temporaryUri = PackUriHelper.ValidatePartUri( new Uri(TemporaryPartNameWithoutExtension + extensionAttributeValue, UriKind.Relative)); _defaultDictionary.Add(temporaryUri.PartUriExtension, new ContentType(contentTypeAttributeValue!)); //Skip the EndElement for Default Tag if (!reader.IsEmptyElement) ProcessEndElement(reader, DefaultTagName); } // Process the attributes for the Default tag private void ProcessOverrideTagAttributes(XmlReader reader) { //There could be a namespace Attribute present at this level. //Also any other attribute on the <Override> tag is an error including xml: and xsi: attributes if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) != 2) throw new XmlException(SR.OverrideTagDoesNotMatchSchema, null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); // get the required Extension and ContentType attributes string? partNameAttributeValue = reader.GetAttribute(PartNameAttributeName); ValidateXmlAttribute(PartNameAttributeName, partNameAttributeValue, OverrideTagName, reader); string? contentTypeAttributeValue = reader.GetAttribute(ContentTypeAttributeName); ThrowIfXmlAttributeMissing(ContentTypeAttributeName, contentTypeAttributeValue, OverrideTagName, reader); PackUriHelper.ValidatedPartUri partUri = PackUriHelper.ValidatePartUri(new Uri(partNameAttributeValue!, UriKind.Relative)); //Lazy initializing - ensure that the override dictionary has been initialized EnsureOverrideDictionary(); // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using PartUriComparer. _overrideDictionary.Add(partUri, new ContentType(contentTypeAttributeValue!)); //Skip the EndElement for Override Tag if (!reader.IsEmptyElement) ProcessEndElement(reader, OverrideTagName); } //If End element is present for Relationship then we process it private void ProcessEndElement(XmlReader reader, string elementName) { Debug.Assert(!reader.IsEmptyElement, "This method should only be called it the Relationship Element is not empty"); reader.Read(); //Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace reader.MoveToContent(); if (reader.NodeType == XmlNodeType.EndElement && string.CompareOrdinal(elementName, reader.LocalName) == 0) return; else throw new XmlException(SR.Format(SR.ElementIsNotEmptyElement, elementName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } private void AddOverrideElement(PackUriHelper.ValidatedPartUri partUri, ContentType contentType) { //Delete any entry corresponding in the Override dictionary //corresponding to the PartUri for which the contentType is being added. //This is to compensate for dead override entries in the content types file. DeleteContentType(partUri); //Lazy initializing - ensure that the override dictionary has been initialized EnsureOverrideDictionary(); // The part Uris are stored in the Override Dictionary in their original form , but they are compared // in a normalized manner using PartUriComparer. _overrideDictionary.Add(partUri, contentType); _dirty = true; } private void AddDefaultElement(string extension, ContentType contentType) { // The extensions are stored in the Default Dictionary in their original form , but they are compared // in a normalized manner using the ExtensionComparer. _defaultDictionary.Add(extension, contentType); _dirty = true; } private void WriteOverrideElement(XmlWriter xmlWriter, PackUriHelper.ValidatedPartUri partUri, ContentType contentType) { xmlWriter.WriteStartElement(OverrideTagName); xmlWriter.WriteAttributeString(PartNameAttributeName, partUri.PartUriString); xmlWriter.WriteAttributeString(ContentTypeAttributeName, contentType.ToString()); xmlWriter.WriteEndElement(); } private void WriteDefaultElement(XmlWriter xmlWriter, string extension, ContentType contentType) { xmlWriter.WriteStartElement(DefaultTagName); xmlWriter.WriteAttributeString(ExtensionAttributeName, extension); xmlWriter.WriteAttributeString(ContentTypeAttributeName, contentType.ToString()); xmlWriter.WriteEndElement(); } //Validate if the required XML attribute is present and not an empty string private void ValidateXmlAttribute(string attributeName, string? attributeValue, string tagName, XmlReader reader) { ThrowIfXmlAttributeMissing(attributeName, attributeValue, tagName, reader); //Checking for empty attribute if (attributeValue!.Length == 0) throw new XmlException(SR.Format(SR.RequiredAttributeEmpty, tagName, attributeName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } //Validate if the required Content type XML attribute is present //Content type of a part can be empty private void ThrowIfXmlAttributeMissing(string attributeName, string? attributeValue, string tagName, XmlReader reader) { if (attributeValue == null) throw new XmlException(SR.Format(SR.RequiredAttributeMissing, tagName, attributeName), null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition); } private Dictionary<PackUriHelper.ValidatedPartUri, ContentType>? _overrideDictionary; private readonly Dictionary<string, ContentType> _defaultDictionary; private readonly ZipArchive _zipArchive; private readonly FileMode _packageFileMode; private readonly FileAccess _packageFileAccess; private readonly ZipStreamManager _zipStreamManager; private ZipArchiveEntry? _contentTypeZipArchiveEntry; private bool _contentTypeStreamExists; private bool _dirty; private CompressionLevel _cachedCompressionLevel; private const string ContentTypesFile = "[Content_Types].xml"; private const string ContentTypesFileUpperInvariant = "[CONTENT_TYPES].XML"; private const int DefaultDictionaryInitialSize = 16; private const int OverrideDictionaryInitialSize = 8; //Xml tag specific strings for the Content Type file private const string TypesNamespaceUri = "http://schemas.openxmlformats.org/package/2006/content-types"; private const string TypesTagName = "Types"; private const string DefaultTagName = "Default"; private const string ExtensionAttributeName = "Extension"; private const string ContentTypeAttributeName = "ContentType"; private const string OverrideTagName = "Override"; private const string PartNameAttributeName = "PartName"; private const string TemporaryPartNameWithoutExtension = "/tempfiles/sample."; } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/IL_Conformance/Old/Base/add.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } //----------------------------------------------------- // ADD //----------------------------------------------------- // Smoke level tests of the ADD instruction //----------------------------------------------------- // Notes: // Will we ever be able to _add R4's _and R8's, I4's _and I8's? //----------------------------------------------------- .assembly 'add'{ //This byte field requests that this assembly not be verified at run time and corresponds to this C# declaration: //[assembly:System.Security.Permissions.SecurityPermissionAttribute( [mscorlib]System.Security.Permissions.SecurityAction.RequestMinimum, Flags=System.Security.Permissions.SecurityPermissionFlag.SkipVerification )] } .class public explicit _add { .field [0] int32 global0 .field [4] int32 global1 .method public void .ctor() { ret } //------------------------- // Entry point - Main - //------------------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 6 .locals init (class _add target) //---------------- // begin testing - //---------------- // creating new instance of _add and storing it newobj instance void _add::.ctor() stloc.0 // -- I4 + I2 -- Make sure we can _add smaller types to larger types ldc.i4 0xAABB conv.i2 ldc.i4 0x1111 add ldc.i4 0xFFFFBBCC ceq brfalse FAIL // -- I4 + I4 -- ldc.i4 0x11223344 ldc.i4 0x11111111 add ldc.i4 0x22334455 ceq brfalse FAIL // -- I4 + & -- Managed ptr // -- I4 + * -- Unmanaged ptr ldloc target ldflda int32 _add::global0 ldc.i4 0xAAAAAAAA stind.i4 ldloc target ldflda int32 _add::global1 ldc.i4 0x44444444 stind.i4 ldloc target ldflda int32 _add::global0 ldc.i4 0x4 add ldind.i4 ldc.i4 0x44444444 ceq brfalse FAIL // -- I8 + I8 -- ldc.i8 0x0011223344556677 ldc.i8 0x7766554433221100 add ldc.i8 0x7777777777777777 ceq brfalse FAIL // -- R4 + R4 -- ldc.r4 float32(0x3F800000) ldc.r4 float32(0x00000000) add ldc.r4 float32(0x3F800000) ceq brfalse FAIL // -- R8 + R8 -- ldc.r8 float64(0xBFF0000000000000) ldc.r8 float64(0x0000000000000000) add ldc.r8 float64(0xBFF0000000000000) ceq brfalse FAIL //---------------- PASS: ldstr "Test SUCCESS" call void [System.Console]System.Console::WriteLine(string) ldc.i4 0x64 ret //---------------- FAIL: ldstr "Test FAILED" call void [System.Console]System.Console::WriteLine(string) ldc.i4 0x0 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } //----------------------------------------------------- // ADD //----------------------------------------------------- // Smoke level tests of the ADD instruction //----------------------------------------------------- // Notes: // Will we ever be able to _add R4's _and R8's, I4's _and I8's? //----------------------------------------------------- .assembly 'add'{ //This byte field requests that this assembly not be verified at run time and corresponds to this C# declaration: //[assembly:System.Security.Permissions.SecurityPermissionAttribute( [mscorlib]System.Security.Permissions.SecurityAction.RequestMinimum, Flags=System.Security.Permissions.SecurityPermissionFlag.SkipVerification )] } .class public explicit _add { .field [0] int32 global0 .field [4] int32 global1 .method public void .ctor() { ret } //------------------------- // Entry point - Main - //------------------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 6 .locals init (class _add target) //---------------- // begin testing - //---------------- // creating new instance of _add and storing it newobj instance void _add::.ctor() stloc.0 // -- I4 + I2 -- Make sure we can _add smaller types to larger types ldc.i4 0xAABB conv.i2 ldc.i4 0x1111 add ldc.i4 0xFFFFBBCC ceq brfalse FAIL // -- I4 + I4 -- ldc.i4 0x11223344 ldc.i4 0x11111111 add ldc.i4 0x22334455 ceq brfalse FAIL // -- I4 + & -- Managed ptr // -- I4 + * -- Unmanaged ptr ldloc target ldflda int32 _add::global0 ldc.i4 0xAAAAAAAA stind.i4 ldloc target ldflda int32 _add::global1 ldc.i4 0x44444444 stind.i4 ldloc target ldflda int32 _add::global0 ldc.i4 0x4 add ldind.i4 ldc.i4 0x44444444 ceq brfalse FAIL // -- I8 + I8 -- ldc.i8 0x0011223344556677 ldc.i8 0x7766554433221100 add ldc.i8 0x7777777777777777 ceq brfalse FAIL // -- R4 + R4 -- ldc.r4 float32(0x3F800000) ldc.r4 float32(0x00000000) add ldc.r4 float32(0x3F800000) ceq brfalse FAIL // -- R8 + R8 -- ldc.r8 float64(0xBFF0000000000000) ldc.r8 float64(0x0000000000000000) add ldc.r8 float64(0xBFF0000000000000) ceq brfalse FAIL //---------------- PASS: ldstr "Test SUCCESS" call void [System.Console]System.Console::WriteLine(string) ldc.i4 0x64 ret //---------------- FAIL: ldstr "Test FAILED" call void [System.Console]System.Console::WriteLine(string) ldc.i4 0x0 ret } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/GC/Features/KeepAlive/keepaliveother/keepalivescope.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Tests KeepAlive() scopes using System; public class Test_keepalivescope { public static int returnValue = 0; public class Dummy { public static bool visited; ~Dummy() { //Console.WriteLine("In Finalize() of Dummy"); visited=true; } } public class CreateObj { public Dummy obj; public bool result; public CreateObj() { obj = new Dummy(); result=false; } public void RunTest() { GC.Collect(); GC.WaitForPendingFinalizers(); if((Dummy.visited == false)) { // has not visited the Finalize() yet result=true; } GC.KeepAlive(obj); // will keep alive 'obj' till this point obj=null; GC.Collect(); GC.WaitForPendingFinalizers(); if(result==true && Dummy.visited==true) returnValue = 100; else returnValue = 1; } } public static int Main() { CreateObj temp = new CreateObj(); temp.RunTest(); if (returnValue == 100) Console.WriteLine("Test passed!"); else Console.WriteLine("Test failed!"); return returnValue; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Tests KeepAlive() scopes using System; public class Test_keepalivescope { public static int returnValue = 0; public class Dummy { public static bool visited; ~Dummy() { //Console.WriteLine("In Finalize() of Dummy"); visited=true; } } public class CreateObj { public Dummy obj; public bool result; public CreateObj() { obj = new Dummy(); result=false; } public void RunTest() { GC.Collect(); GC.WaitForPendingFinalizers(); if((Dummy.visited == false)) { // has not visited the Finalize() yet result=true; } GC.KeepAlive(obj); // will keep alive 'obj' till this point obj=null; GC.Collect(); GC.WaitForPendingFinalizers(); if(result==true && Dummy.visited==true) returnValue = 100; else returnValue = 1; } } public static int Main() { CreateObj temp = new CreateObj(); temp.RunTest(); if (returnValue == 100) Console.WriteLine("Test passed!"); else Console.WriteLine("Test failed!"); return returnValue; } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.Json/tests/Common/CollectionTests/CollectionTests.Generic.Read.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Xunit; namespace System.Text.Json.Serialization.Tests { public abstract partial class CollectionTests { [Fact] public async Task ReadListOfList() { List<List<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<List<List<int>>>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); GenericListWrapper<StringListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<StringListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadListOfArray() { List<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<List<int[]>>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); GenericListWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadArrayOfList() { List<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>[]>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); StringListWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadSimpleList() { List<int> i = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>>(@"[1,2]"); Assert.Equal(1, i[0]); Assert.Equal(2, i[1]); i = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>>(@"[]"); Assert.Equal(0, i.Count); StringListWrapper i2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(@"[""1"",""2""]"); Assert.Equal("1", i2[0]); Assert.Equal("2", i2[1]); i2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(@"[]"); Assert.Equal(0, i2.Count); } [Fact] public async Task ReadGenericIEnumerableOfGenericIEnumerable() { IEnumerable<IEnumerable<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<IEnumerable<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IEnumerable<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIEnumerableWrapper<StringIEnumerableWrapper>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadIEnumerableTOfArray() { IEnumerable<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIEnumerableWrapper<int[]>>(@"[[1,2],[3, 4]]")); } [Fact] public async Task ReadArrayOfIEnumerableT() { IEnumerable<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IEnumerable<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadSimpleGenericIEnumerable() { IEnumerable<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>>(@"[]"); Assert.Equal(0, result.Count()); // There is no way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper>(@"[""1"",""2""]")); await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper>(@"[]")); } [Fact] public async Task ReadIListTOfIListT() { IList<IList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<IList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IList<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } GenericIListWrapper<StringIListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericIListWrapper<StringIListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringIListWrapper il in result2) { foreach (string str in il) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadGenericIListOfArray() { IList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } GenericIListWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericIListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadArrayOfIListT() { IList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IList<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } StringIListWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringIListWrapper il in result2) { foreach (string str in il) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadSimpleGenericIList() { IList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>>(@"[]"); Assert.Equal(0, result.Count()); StringIListWrapper result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper>(@"[""1"",""2""]"); expected = 1; foreach (string str in result2) { Assert.Equal($"{expected++}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadGenericStructIList() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper[0]); Assert.Equal(20, wrapper[1]); Assert.Equal(30, wrapper[2]); } [Fact] public async Task ReadNullableGenericStructIList() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value[0]); Assert.Equal(20, wrapper.Value[1]); Assert.Equal(30, wrapper.Value[2]); } [Fact] public async Task ReadNullableGenericStructIListWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] public async Task ReadGenericStructICollection() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper.ElementAt(0)); Assert.Equal(20, wrapper.ElementAt(1)); Assert.Equal(30, wrapper.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructICollection() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value.ElementAt(0)); Assert.Equal(20, wrapper.Value.ElementAt(1)); Assert.Equal(30, wrapper.Value.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructICollectionWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] public async Task ReadGenericICollectionOfGenericICollection() { ICollection<ICollection<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<ICollection<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (ICollection<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } GenericICollectionWrapper<GenericICollectionWrapper<string>> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<GenericICollectionWrapper<string>>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (GenericICollectionWrapper<string> ic in result2) { foreach (string str in ic) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadGenericICollectionOfArray() { ICollection<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } GenericICollectionWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadArrayOfGenericICollection() { ICollection<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (ICollection<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleGenericICollection() { ICollection<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>>(@"[]"); Assert.Equal(0, result.Count()); GenericICollectionWrapper<string> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string>>(@"[""1"",""2""]"); expected = 1; foreach (string str in result2) { Assert.Equal($"{expected++}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string>>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadGenericIReadOnlyCollectionOfGenericIReadOnlyCollection() { IReadOnlyCollection<IReadOnlyCollection<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<IReadOnlyCollection<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyCollection<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } // There's no way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>( async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyCollectionWrapper<WrapperForIReadOnlyCollectionOfT<string>>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericIReadOnlyCollectionOfArray() { IReadOnlyCollection<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyCollectionWrapper<int[]>>(@"[[1,2],[3,4]]")); } [Fact] public async Task ReadArrayOfIReadOnlyCollectionT() { IReadOnlyCollection<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyCollection<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<WrapperForIReadOnlyCollectionOfT<string>[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericSimpleIReadOnlyCollection() { IReadOnlyCollection<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>>(@"[]"); Assert.Equal(0, result.Count()); // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<WrapperForIReadOnlyCollectionOfT<string>>(@"[""1"",""2""]")); } [Fact] public async Task ReadGenericIReadOnlyListOfGenericIReadOnlyList() { IReadOnlyList<IReadOnlyList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<IReadOnlyList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyList<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyListWrapper<StringIReadOnlyListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericIReadOnlyListOfArray() { IReadOnlyList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadArrayOfGenericIReadOnlyList() { IReadOnlyList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyList<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIReadOnlyListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadSimpleGenericIReadOnlyList() { IReadOnlyList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>>(@"[]"); Assert.Equal(0, result.Count()); // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIReadOnlyListWrapper>(@"[""1"",""2""]")); } [Fact] public async Task ReadGenericISetOfGenericISet() { ISet<ISet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<ISet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } GenericISetWrapper<StringISetWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericISetWrapper<StringISetWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); if (result2.First().Contains("1")) { Assert.Equal(new HashSet<string> { "1", "2" }, (ISet<string>)result2.First()); Assert.Equal(new HashSet<string> { "3", "4" }, (ISet<string>)result2.Last()); } else { Assert.Equal(new HashSet<string> { "3", "4" }, (ISet<string>)result.First()); Assert.Equal(new HashSet<string> { "1", "2" }, (ISet<string>)result.Last()); } } [Fact] public async Task ReadGenericStructISet() { string json = "[10, 20, 30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper.ElementAt(0)); Assert.Equal(20, wrapper.ElementAt(1)); Assert.Equal(30, wrapper.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructISet() { string json = "[10, 20, 30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value.ElementAt(0)); Assert.Equal(20, wrapper.Value.ElementAt(1)); Assert.Equal(30, wrapper.Value.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructISetWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/50721", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public async Task ReadISetTOfHashSetT() { ISet<HashSet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<HashSet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadHashSetTOfISetT() { HashSet<ISet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<ISet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadISetTOfArray() { ISet<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int[]>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadArrayOfISetT() { ISet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>[]>(@"[[1,2],[3,4]]"); Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } [Fact] public async Task ReadSimpleISetT() { ISet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>>(@"[1,2]"); Assert.Equal(new HashSet<int> { 1, 2 }, result); result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task StackTOfStackT() { Stack<Stack<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<Stack<int>>>(@"[[1,2],[3,4]]"); int expected = 4; foreach (Stack<int> st in result) { foreach (int i in st) { Assert.Equal(expected--, i); } } GenericStackWrapper<StringStackWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStackWrapper<StringStackWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 4; foreach (StringStackWrapper st in result2) { foreach (string str in st) { Assert.Equal($"{expected--}", str); } } } [Fact] public async Task ReadGenericStackOfArray() { Stack<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int[]>>(@"[[1,2],[3,4]]"); int expected = 3; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } expected = 1; } GenericStackWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStackWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 3; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } expected = 1; } } [Fact] public async Task ReadArrayOfGenericStack() { Stack<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>[]>(@"[[1,2],[3,4]]"); int expected = 2; foreach (Stack<int> st in result) { foreach (int i in st) { Assert.Equal(expected--, i); } expected = 4; } StringStackWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 2; foreach (StringStackWrapper st in result2) { foreach (string str in st) { Assert.Equal($"{expected--}", str); } expected = 4; } } [Fact] public async Task ReadSimpleGenericStack() { Stack<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>>(@"[1,2]"); int expected = 2; foreach (int i in result) { Assert.Equal(expected--, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>>(@"[]"); Assert.Equal(0, result.Count()); StringStackWrapper result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper>(@"[""1"",""2""]"); expected = 2; foreach (string str in result2) { Assert.Equal($"{expected--}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadQueueTOfQueueT() { Queue<Queue<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<Queue<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (Queue<int> q in result) { foreach (int i in q) { Assert.Equal(expected++, i); } } GenericQueueWrapper<StringQueueWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericQueueWrapper<StringQueueWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringQueueWrapper q in result2) { foreach (string str in q) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadQueueTOfArray() { Queue<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfIQueueT() { Queue<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (Queue<int> q in result) { foreach (int i in q) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleQueueT() { Queue<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadHashSetTOfHashSetT() { HashSet<HashSet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<HashSet<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (HashSet<int> hs in result) { foreach (int i in hs) { Assert.Equal(expected++, i); } } GenericHashSetWrapper<StringHashSetWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericHashSetWrapper<StringHashSetWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringHashSetWrapper hs in result2) { foreach (string str in hs) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadHashSetTOfArray() { HashSet<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfIHashSetT() { HashSet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (HashSet<int> hs in result) { foreach (int i in hs) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleHashSetT() { HashSet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadGenericLinkedListOfGenericLinkedList() { LinkedList<LinkedList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<LinkedList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (LinkedList<int> l in result) { foreach (int i in l) { Assert.Equal(expected++, i); } } GenericLinkedListWrapper<StringLinkedListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericLinkedListWrapper<StringLinkedListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringLinkedListWrapper l in result2) { foreach (string str in l) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadLinkedListTOfArray() { LinkedList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfILinkedListT() { LinkedList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (LinkedList<int> l in result) { foreach (int i in l) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleLinkedListT() { LinkedList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadArrayOfSortedSetT() { SortedSet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (SortedSet<int> s in result) { foreach (int i in s) { Assert.Equal(expected++, i); } } StringSortedSetWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringSortedSetWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringSortedSetWrapper s in result2) { foreach (string str in s) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadSimpleSortedSetT() { SortedSet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadClass_WithGenericStructCollectionWrapper_NullJson_Throws() { await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructIListWrapper>(@"{ ""List"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructICollectionWrapper>(@"{ ""Collection"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructIDictionaryWrapper>(@"{ ""Dictionary"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructISetWrapper>(@"{ ""Set"": null }")); } [Fact] public async Task ReadSimpleTestClass_GenericStructCollectionWrappers() { SimpleTestClassWithGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestClassWithGenericStructCollectionWrappers>(SimpleTestClassWithGenericStructCollectionWrappers.s_json); obj.Verify(); } [Fact] public async Task ReadSimpleTestStruct_NullableGenericStructCollectionWrappers() { { SimpleTestStructWithNullableGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestStructWithNullableGenericStructCollectionWrappers>(SimpleTestStructWithNullableGenericStructCollectionWrappers.s_json); obj.Verify(); } { string json = @"{" + @"""List"" : null," + @"""Collection"" : null," + @"""Set"" : null," + @"""Dictionary"" : null" + @"}"; SimpleTestStructWithNullableGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestStructWithNullableGenericStructCollectionWrappers>(json); Assert.False(obj.List.HasValue); Assert.False(obj.Collection.HasValue); Assert.False(obj.Set.HasValue); Assert.False(obj.Dictionary.HasValue); } } [Fact] public async Task ReadSimpleTestClass_GenericCollectionWrappers() { SimpleTestClassWithGenericCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestClassWithGenericCollectionWrappers>(SimpleTestClassWithGenericCollectionWrappers.s_json); obj.Verify(); } [Theory] [MemberData(nameof(ReadSimpleTestClass_GenericWrappers_NoAddMethod))] public async Task ReadSimpleTestClass_GenericWrappers_NoAddMethod_Throws(Type type, string json, Type exceptionMessageType) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(exceptionMessageType.ToString(), ex.Message); } public static IEnumerable<object[]> ReadSimpleTestClass_GenericWrappers_NoAddMethod { get { yield return new object[] { typeof(SimpleTestClassWithStringIEnumerableWrapper), SimpleTestClassWithStringIEnumerableWrapper.s_json, typeof(StringIEnumerableWrapper) }; yield return new object[] { typeof(SimpleTestClassWithStringIReadOnlyCollectionWrapper), SimpleTestClassWithStringIReadOnlyCollectionWrapper.s_json, typeof(WrapperForIReadOnlyCollectionOfT<string>) }; yield return new object[] { typeof(SimpleTestClassWithStringIReadOnlyListWrapper), SimpleTestClassWithStringIReadOnlyListWrapper.s_json, typeof(StringIReadOnlyListWrapper) }; yield return new object[] { typeof(SimpleTestClassWithStringToStringIReadOnlyDictionaryWrapper), SimpleTestClassWithStringToStringIReadOnlyDictionaryWrapper.s_json, typeof(GenericIReadOnlyDictionaryWrapper<string, string>) }; } } [Theory] [InlineData(typeof(ReadOnlyWrapperForIList), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringIListWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringICollectionWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringISetWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyWrapperForIDictionary), @"{""Key"":""key"",""Value"":""value""}")] [InlineData(typeof(ReadOnlyStringToStringIDictionaryWrapper), @"{""Key"":""key"",""Value"":""value""}")] public async Task ReadReadOnlyCollections_Throws(Type type, string json) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(type.ToString(), ex.Message); } [Fact] public async Task Read_HigherOrderCollectionInheritance_Works() { const string json = "[\"test\"]"; Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<string[]>(json))[0]); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<List<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<MyMyList<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<MyListString>(json)).First()); } [Theory] [InlineData(typeof(GenericIEnumerableWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIEnumerableWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericICollectionWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericICollectionWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIListWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIListWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericISetWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericISetWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIDictionaryWrapperPrivateConstructor<string, string>), @"{""Key"":""Value""}")] [InlineData(typeof(GenericIDictionaryWrapperInternalConstructor<string, string>), @"{""Key"":""Value""}")] [InlineData(typeof(StringToStringIReadOnlyDictionaryWrapperPrivateConstructor), @"{""Key"":""Value""}")] [InlineData(typeof(StringToStringIReadOnlyDictionaryWrapperInternalConstructor), @"{""Key"":""Value""}")] [InlineData(typeof(GenericListWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericListWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericQueueWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericQueueWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericStackWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericStackWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(StringToGenericDictionaryWrapperPrivateConstructor<string>), @"{""Key"":""Value""}")] [InlineData(typeof(StringToGenericDictionaryWrapperInternalConstructor<string>), @"{""Key"":""Value""}")] public async Task Read_Generic_NoPublicConstructor_Throws(Type type, string json) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(type.ToString(), ex.Message); } [Fact] public async Task DoesNotCall_CollectionPropertyGetter_EveryTimeElementIsAdded() { var networkList = new List<string> { "Network1", "Network2" }; string serialized = await JsonSerializerWrapperForString.SerializeWrapper(new NetworkWrapper { NetworkList = networkList }); Assert.Equal(@"{""NetworkList"":[""Network1"",""Network2""]}", serialized); NetworkWrapper obj = await JsonSerializerWrapperForString.DeserializeWrapper<NetworkWrapper>(serialized); int i = 0; foreach (string network in obj.NetworkList) { Assert.Equal(networkList[i], network); i++; } } public class NetworkWrapper { private string _Networks = string.Empty; [JsonIgnore] public string Networks { get => _Networks; set => _Networks = value ?? string.Empty; } public IEnumerable<string> NetworkList { get => Networks.Split(','); set => Networks = value != null ? string.Join(",", value) : ""; } } [Fact] public async Task CollectionWith_BackingField_CanRoundtrip() { string json = "{\"AllowedGrantTypes\":[\"client_credentials\"]}"; Client obj = await JsonSerializerWrapperForString.DeserializeWrapper<Client>(json); Assert.Equal("client_credentials", obj.AllowedGrantTypes.First()); string serialized = await JsonSerializerWrapperForString.SerializeWrapper(obj); Assert.Equal(json, serialized); } public class Client { private ICollection<string> _allowedGrantTypes = new HashSetWithBackingCollection(); public ICollection<string> AllowedGrantTypes { get { return _allowedGrantTypes; } set { _allowedGrantTypes = new HashSetWithBackingCollection(value); } } } [Theory] [MemberData(nameof(CustomInterfaces_Enumerables))] public async Task CustomInterfacesNotSupported_Enumerables(Type type) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper("[]", type)); Assert.Contains(type.ToString(), ex.ToString()); } [Theory] [MemberData(nameof(CustomInterfaces_Dictionaries))] public async Task CustomInterfacesNotSupported_Dictionaries(Type type) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper("{}", type)); Assert.Contains(type.ToString(), ex.ToString()); } public static IEnumerable<object[]> CustomInterfaces_Enumerables() { yield return new object[] { typeof(IDerivedICollectionOfT<string>) }; yield return new object[] { typeof(IDerivedIList) }; yield return new object[] { typeof(IDerivedISetOfT<string>) }; } public static IEnumerable<object[]> CustomInterfaces_Dictionaries() { yield return new object[] { typeof(IDerivedIDictionaryOfTKeyTValue<string, string>) }; } [Fact] public async Task IReadOnlyDictionary_NotSupportedKey() { await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyDictionary<Uri, int>>(@"{""http://foo"":1}")); await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.SerializeWrapper(new GenericIReadOnlyDictionaryWrapper<Uri, int>(new Dictionary<Uri, int> { { new Uri("http://foo"), 1 } }))); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Xunit; namespace System.Text.Json.Serialization.Tests { public abstract partial class CollectionTests { [Fact] public async Task ReadListOfList() { List<List<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<List<List<int>>>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); GenericListWrapper<StringListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<StringListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadListOfArray() { List<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<List<int[]>>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); GenericListWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadArrayOfList() { List<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>[]>(@"[[1,2],[3,4]]"); Assert.Equal(1, result[0][0]); Assert.Equal(2, result[0][1]); Assert.Equal(3, result[1][0]); Assert.Equal(4, result[1][1]); StringListWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); Assert.Equal("1", result2[0][0]); Assert.Equal("2", result2[0][1]); Assert.Equal("3", result2[1][0]); Assert.Equal("4", result2[1][1]); } [Fact] public async Task ReadSimpleList() { List<int> i = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>>(@"[1,2]"); Assert.Equal(1, i[0]); Assert.Equal(2, i[1]); i = await JsonSerializerWrapperForString.DeserializeWrapper<List<int>>(@"[]"); Assert.Equal(0, i.Count); StringListWrapper i2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(@"[""1"",""2""]"); Assert.Equal("1", i2[0]); Assert.Equal("2", i2[1]); i2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(@"[]"); Assert.Equal(0, i2.Count); } [Fact] public async Task ReadGenericIEnumerableOfGenericIEnumerable() { IEnumerable<IEnumerable<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<IEnumerable<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IEnumerable<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIEnumerableWrapper<StringIEnumerableWrapper>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadIEnumerableTOfArray() { IEnumerable<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIEnumerableWrapper<int[]>>(@"[[1,2],[3, 4]]")); } [Fact] public async Task ReadArrayOfIEnumerableT() { IEnumerable<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IEnumerable<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadSimpleGenericIEnumerable() { IEnumerable<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IEnumerable<int>>(@"[]"); Assert.Equal(0, result.Count()); // There is no way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper>(@"[""1"",""2""]")); await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIEnumerableWrapper>(@"[]")); } [Fact] public async Task ReadIListTOfIListT() { IList<IList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<IList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IList<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } GenericIListWrapper<StringIListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericIListWrapper<StringIListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringIListWrapper il in result2) { foreach (string str in il) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadGenericIListOfArray() { IList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } GenericIListWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericIListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadArrayOfIListT() { IList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IList<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } StringIListWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringIListWrapper il in result2) { foreach (string str in il) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadSimpleGenericIList() { IList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IList<int>>(@"[]"); Assert.Equal(0, result.Count()); StringIListWrapper result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper>(@"[""1"",""2""]"); expected = 1; foreach (string str in result2) { Assert.Equal($"{expected++}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringIListWrapper>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadGenericStructIList() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper[0]); Assert.Equal(20, wrapper[1]); Assert.Equal(30, wrapper[2]); } [Fact] public async Task ReadNullableGenericStructIList() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value[0]); Assert.Equal(20, wrapper.Value[1]); Assert.Equal(30, wrapper.Value[2]); } [Fact] public async Task ReadNullableGenericStructIListWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructIListWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] public async Task ReadGenericStructICollection() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper.ElementAt(0)); Assert.Equal(20, wrapper.ElementAt(1)); Assert.Equal(30, wrapper.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructICollection() { string json = "[10,20,30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value.ElementAt(0)); Assert.Equal(20, wrapper.Value.ElementAt(1)); Assert.Equal(30, wrapper.Value.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructICollectionWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructICollectionWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] public async Task ReadGenericICollectionOfGenericICollection() { ICollection<ICollection<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<ICollection<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (ICollection<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } GenericICollectionWrapper<GenericICollectionWrapper<string>> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<GenericICollectionWrapper<string>>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (GenericICollectionWrapper<string> ic in result2) { foreach (string str in ic) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadGenericICollectionOfArray() { ICollection<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } GenericICollectionWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadArrayOfGenericICollection() { ICollection<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (ICollection<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleGenericICollection() { ICollection<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<ICollection<int>>(@"[]"); Assert.Equal(0, result.Count()); GenericICollectionWrapper<string> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string>>(@"[""1"",""2""]"); expected = 1; foreach (string str in result2) { Assert.Equal($"{expected++}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericICollectionWrapper<string>>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadGenericIReadOnlyCollectionOfGenericIReadOnlyCollection() { IReadOnlyCollection<IReadOnlyCollection<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<IReadOnlyCollection<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyCollection<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } // There's no way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>( async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyCollectionWrapper<WrapperForIReadOnlyCollectionOfT<string>>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericIReadOnlyCollectionOfArray() { IReadOnlyCollection<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyCollectionWrapper<int[]>>(@"[[1,2],[3,4]]")); } [Fact] public async Task ReadArrayOfIReadOnlyCollectionT() { IReadOnlyCollection<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyCollection<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<WrapperForIReadOnlyCollectionOfT<string>[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericSimpleIReadOnlyCollection() { IReadOnlyCollection<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyCollection<int>>(@"[]"); Assert.Equal(0, result.Count()); // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<WrapperForIReadOnlyCollectionOfT<string>>(@"[""1"",""2""]")); } [Fact] public async Task ReadGenericIReadOnlyListOfGenericIReadOnlyList() { IReadOnlyList<IReadOnlyList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<IReadOnlyList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyList<int> ie in result) { foreach (int i in ie) { Assert.Equal(expected++, i); } } await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyListWrapper<StringIReadOnlyListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadGenericIReadOnlyListOfArray() { IReadOnlyList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<GenericIReadOnlyListWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadArrayOfGenericIReadOnlyList() { IReadOnlyList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (IReadOnlyList<int> arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIReadOnlyListWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]")); } [Fact] public async Task ReadSimpleGenericIReadOnlyList() { IReadOnlyList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyList<int>>(@"[]"); Assert.Equal(0, result.Count()); // No way to populate this collection. await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<StringIReadOnlyListWrapper>(@"[""1"",""2""]")); } [Fact] public async Task ReadGenericISetOfGenericISet() { ISet<ISet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<ISet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } GenericISetWrapper<StringISetWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericISetWrapper<StringISetWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); if (result2.First().Contains("1")) { Assert.Equal(new HashSet<string> { "1", "2" }, (ISet<string>)result2.First()); Assert.Equal(new HashSet<string> { "3", "4" }, (ISet<string>)result2.Last()); } else { Assert.Equal(new HashSet<string> { "3", "4" }, (ISet<string>)result.First()); Assert.Equal(new HashSet<string> { "1", "2" }, (ISet<string>)result.Last()); } } [Fact] public async Task ReadGenericStructISet() { string json = "[10, 20, 30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>>(json); Assert.Equal(3, wrapper.Count); Assert.Equal(10, wrapper.ElementAt(0)); Assert.Equal(20, wrapper.ElementAt(1)); Assert.Equal(30, wrapper.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructISet() { string json = "[10, 20, 30]"; var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>?>(json); Assert.True(wrapper.HasValue); Assert.Equal(3, wrapper.Value.Count); Assert.Equal(10, wrapper.Value.ElementAt(0)); Assert.Equal(20, wrapper.Value.ElementAt(1)); Assert.Equal(30, wrapper.Value.ElementAt(2)); } [Fact] public async Task ReadNullableGenericStructISetWithNullJson() { var wrapper = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStructISetWrapper<int>?>("null"); Assert.False(wrapper.HasValue); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/50721", typeof(PlatformDetection), nameof(PlatformDetection.IsBuiltWithAggressiveTrimming), nameof(PlatformDetection.IsBrowser))] public async Task ReadISetTOfHashSetT() { ISet<HashSet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<HashSet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadHashSetTOfISetT() { HashSet<ISet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<ISet<int>>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadISetTOfArray() { ISet<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int[]>>(@"[[1,2],[3,4]]"); if (result.First().Contains(1)) { Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } else { Assert.Equal(new HashSet<int> { 3, 4 }, result.First()); Assert.Equal(new HashSet<int> { 1, 2 }, result.Last()); } } [Fact] public async Task ReadArrayOfISetT() { ISet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>[]>(@"[[1,2],[3,4]]"); Assert.Equal(new HashSet<int> { 1, 2 }, result.First()); Assert.Equal(new HashSet<int> { 3, 4 }, result.Last()); } [Fact] public async Task ReadSimpleISetT() { ISet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>>(@"[1,2]"); Assert.Equal(new HashSet<int> { 1, 2 }, result); result = await JsonSerializerWrapperForString.DeserializeWrapper<ISet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task StackTOfStackT() { Stack<Stack<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<Stack<int>>>(@"[[1,2],[3,4]]"); int expected = 4; foreach (Stack<int> st in result) { foreach (int i in st) { Assert.Equal(expected--, i); } } GenericStackWrapper<StringStackWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStackWrapper<StringStackWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 4; foreach (StringStackWrapper st in result2) { foreach (string str in st) { Assert.Equal($"{expected--}", str); } } } [Fact] public async Task ReadGenericStackOfArray() { Stack<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int[]>>(@"[[1,2],[3,4]]"); int expected = 3; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } expected = 1; } GenericStackWrapper<string[]> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericStackWrapper<string[]>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 3; foreach (string[] arr in result2) { foreach (string str in arr) { Assert.Equal($"{expected++}", str); } expected = 1; } } [Fact] public async Task ReadArrayOfGenericStack() { Stack<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>[]>(@"[[1,2],[3,4]]"); int expected = 2; foreach (Stack<int> st in result) { foreach (int i in st) { Assert.Equal(expected--, i); } expected = 4; } StringStackWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 2; foreach (StringStackWrapper st in result2) { foreach (string str in st) { Assert.Equal($"{expected--}", str); } expected = 4; } } [Fact] public async Task ReadSimpleGenericStack() { Stack<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>>(@"[1,2]"); int expected = 2; foreach (int i in result) { Assert.Equal(expected--, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<Stack<int>>(@"[]"); Assert.Equal(0, result.Count()); StringStackWrapper result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper>(@"[""1"",""2""]"); expected = 2; foreach (string str in result2) { Assert.Equal($"{expected--}", str); } result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringStackWrapper>(@"[]"); Assert.Equal(0, result2.Count()); } [Fact] public async Task ReadQueueTOfQueueT() { Queue<Queue<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<Queue<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (Queue<int> q in result) { foreach (int i in q) { Assert.Equal(expected++, i); } } GenericQueueWrapper<StringQueueWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericQueueWrapper<StringQueueWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringQueueWrapper q in result2) { foreach (string str in q) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadQueueTOfArray() { Queue<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfIQueueT() { Queue<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (Queue<int> q in result) { foreach (int i in q) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleQueueT() { Queue<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<Queue<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadHashSetTOfHashSetT() { HashSet<HashSet<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<HashSet<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (HashSet<int> hs in result) { foreach (int i in hs) { Assert.Equal(expected++, i); } } GenericHashSetWrapper<StringHashSetWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericHashSetWrapper<StringHashSetWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringHashSetWrapper hs in result2) { foreach (string str in hs) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadHashSetTOfArray() { HashSet<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfIHashSetT() { HashSet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (HashSet<int> hs in result) { foreach (int i in hs) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleHashSetT() { HashSet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<HashSet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadGenericLinkedListOfGenericLinkedList() { LinkedList<LinkedList<int>> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<LinkedList<int>>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (LinkedList<int> l in result) { foreach (int i in l) { Assert.Equal(expected++, i); } } GenericLinkedListWrapper<StringLinkedListWrapper> result2 = await JsonSerializerWrapperForString.DeserializeWrapper<GenericLinkedListWrapper<StringLinkedListWrapper>>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringLinkedListWrapper l in result2) { foreach (string str in l) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadLinkedListTOfArray() { LinkedList<int[]> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int[]>>(@"[[1,2],[3,4]]"); int expected = 1; foreach (int[] arr in result) { foreach (int i in arr) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadArrayOfILinkedListT() { LinkedList<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (LinkedList<int> l in result) { foreach (int i in l) { Assert.Equal(expected++, i); } } } [Fact] public async Task ReadSimpleLinkedListT() { LinkedList<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<LinkedList<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadArrayOfSortedSetT() { SortedSet<int>[] result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>[]>(@"[[1,2],[3,4]]"); int expected = 1; foreach (SortedSet<int> s in result) { foreach (int i in s) { Assert.Equal(expected++, i); } } StringSortedSetWrapper[] result2 = await JsonSerializerWrapperForString.DeserializeWrapper<StringSortedSetWrapper[]>(@"[[""1"",""2""],[""3"",""4""]]"); expected = 1; foreach (StringSortedSetWrapper s in result2) { foreach (string str in s) { Assert.Equal($"{expected++}", str); } } } [Fact] public async Task ReadSimpleSortedSetT() { SortedSet<int> result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>>(@"[1,2]"); int expected = 1; foreach (int i in result) { Assert.Equal(expected++, i); } result = await JsonSerializerWrapperForString.DeserializeWrapper<SortedSet<int>>(@"[]"); Assert.Equal(0, result.Count()); } [Fact] public async Task ReadClass_WithGenericStructCollectionWrapper_NullJson_Throws() { await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructIListWrapper>(@"{ ""List"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructICollectionWrapper>(@"{ ""Collection"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructIDictionaryWrapper>(@"{ ""Dictionary"": null }")); await Assert.ThrowsAsync<JsonException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<ClassWithGenericStructISetWrapper>(@"{ ""Set"": null }")); } [Fact] public async Task ReadSimpleTestClass_GenericStructCollectionWrappers() { SimpleTestClassWithGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestClassWithGenericStructCollectionWrappers>(SimpleTestClassWithGenericStructCollectionWrappers.s_json); obj.Verify(); } [Fact] public async Task ReadSimpleTestStruct_NullableGenericStructCollectionWrappers() { { SimpleTestStructWithNullableGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestStructWithNullableGenericStructCollectionWrappers>(SimpleTestStructWithNullableGenericStructCollectionWrappers.s_json); obj.Verify(); } { string json = @"{" + @"""List"" : null," + @"""Collection"" : null," + @"""Set"" : null," + @"""Dictionary"" : null" + @"}"; SimpleTestStructWithNullableGenericStructCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestStructWithNullableGenericStructCollectionWrappers>(json); Assert.False(obj.List.HasValue); Assert.False(obj.Collection.HasValue); Assert.False(obj.Set.HasValue); Assert.False(obj.Dictionary.HasValue); } } [Fact] public async Task ReadSimpleTestClass_GenericCollectionWrappers() { SimpleTestClassWithGenericCollectionWrappers obj = await JsonSerializerWrapperForString.DeserializeWrapper<SimpleTestClassWithGenericCollectionWrappers>(SimpleTestClassWithGenericCollectionWrappers.s_json); obj.Verify(); } [Theory] [MemberData(nameof(ReadSimpleTestClass_GenericWrappers_NoAddMethod))] public async Task ReadSimpleTestClass_GenericWrappers_NoAddMethod_Throws(Type type, string json, Type exceptionMessageType) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(exceptionMessageType.ToString(), ex.Message); } public static IEnumerable<object[]> ReadSimpleTestClass_GenericWrappers_NoAddMethod { get { yield return new object[] { typeof(SimpleTestClassWithStringIEnumerableWrapper), SimpleTestClassWithStringIEnumerableWrapper.s_json, typeof(StringIEnumerableWrapper) }; yield return new object[] { typeof(SimpleTestClassWithStringIReadOnlyCollectionWrapper), SimpleTestClassWithStringIReadOnlyCollectionWrapper.s_json, typeof(WrapperForIReadOnlyCollectionOfT<string>) }; yield return new object[] { typeof(SimpleTestClassWithStringIReadOnlyListWrapper), SimpleTestClassWithStringIReadOnlyListWrapper.s_json, typeof(StringIReadOnlyListWrapper) }; yield return new object[] { typeof(SimpleTestClassWithStringToStringIReadOnlyDictionaryWrapper), SimpleTestClassWithStringToStringIReadOnlyDictionaryWrapper.s_json, typeof(GenericIReadOnlyDictionaryWrapper<string, string>) }; } } [Theory] [InlineData(typeof(ReadOnlyWrapperForIList), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringIListWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringICollectionWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyStringISetWrapper), @"[""1"", ""2""]")] [InlineData(typeof(ReadOnlyWrapperForIDictionary), @"{""Key"":""key"",""Value"":""value""}")] [InlineData(typeof(ReadOnlyStringToStringIDictionaryWrapper), @"{""Key"":""key"",""Value"":""value""}")] public async Task ReadReadOnlyCollections_Throws(Type type, string json) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(type.ToString(), ex.Message); } [Fact] public async Task Read_HigherOrderCollectionInheritance_Works() { const string json = "[\"test\"]"; Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<string[]>(json))[0]); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<List<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<StringListWrapper>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<GenericListWrapper<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<MyMyList<string>>(json)).First()); Assert.Equal("test", (await JsonSerializerWrapperForString.DeserializeWrapper<MyListString>(json)).First()); } [Theory] [InlineData(typeof(GenericIEnumerableWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIEnumerableWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericICollectionWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericICollectionWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIListWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIListWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericISetWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericISetWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericIDictionaryWrapperPrivateConstructor<string, string>), @"{""Key"":""Value""}")] [InlineData(typeof(GenericIDictionaryWrapperInternalConstructor<string, string>), @"{""Key"":""Value""}")] [InlineData(typeof(StringToStringIReadOnlyDictionaryWrapperPrivateConstructor), @"{""Key"":""Value""}")] [InlineData(typeof(StringToStringIReadOnlyDictionaryWrapperInternalConstructor), @"{""Key"":""Value""}")] [InlineData(typeof(GenericListWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericListWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericQueueWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericQueueWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericStackWrapperPrivateConstructor<string>), @"[""1""]")] [InlineData(typeof(GenericStackWrapperInternalConstructor<string>), @"[""1""]")] [InlineData(typeof(StringToGenericDictionaryWrapperPrivateConstructor<string>), @"{""Key"":""Value""}")] [InlineData(typeof(StringToGenericDictionaryWrapperInternalConstructor<string>), @"{""Key"":""Value""}")] public async Task Read_Generic_NoPublicConstructor_Throws(Type type, string json) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper(json, type)); Assert.Contains(type.ToString(), ex.Message); } [Fact] public async Task DoesNotCall_CollectionPropertyGetter_EveryTimeElementIsAdded() { var networkList = new List<string> { "Network1", "Network2" }; string serialized = await JsonSerializerWrapperForString.SerializeWrapper(new NetworkWrapper { NetworkList = networkList }); Assert.Equal(@"{""NetworkList"":[""Network1"",""Network2""]}", serialized); NetworkWrapper obj = await JsonSerializerWrapperForString.DeserializeWrapper<NetworkWrapper>(serialized); int i = 0; foreach (string network in obj.NetworkList) { Assert.Equal(networkList[i], network); i++; } } public class NetworkWrapper { private string _Networks = string.Empty; [JsonIgnore] public string Networks { get => _Networks; set => _Networks = value ?? string.Empty; } public IEnumerable<string> NetworkList { get => Networks.Split(','); set => Networks = value != null ? string.Join(",", value) : ""; } } [Fact] public async Task CollectionWith_BackingField_CanRoundtrip() { string json = "{\"AllowedGrantTypes\":[\"client_credentials\"]}"; Client obj = await JsonSerializerWrapperForString.DeserializeWrapper<Client>(json); Assert.Equal("client_credentials", obj.AllowedGrantTypes.First()); string serialized = await JsonSerializerWrapperForString.SerializeWrapper(obj); Assert.Equal(json, serialized); } public class Client { private ICollection<string> _allowedGrantTypes = new HashSetWithBackingCollection(); public ICollection<string> AllowedGrantTypes { get { return _allowedGrantTypes; } set { _allowedGrantTypes = new HashSetWithBackingCollection(value); } } } [Theory] [MemberData(nameof(CustomInterfaces_Enumerables))] public async Task CustomInterfacesNotSupported_Enumerables(Type type) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper("[]", type)); Assert.Contains(type.ToString(), ex.ToString()); } [Theory] [MemberData(nameof(CustomInterfaces_Dictionaries))] public async Task CustomInterfacesNotSupported_Dictionaries(Type type) { NotSupportedException ex = await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper("{}", type)); Assert.Contains(type.ToString(), ex.ToString()); } public static IEnumerable<object[]> CustomInterfaces_Enumerables() { yield return new object[] { typeof(IDerivedICollectionOfT<string>) }; yield return new object[] { typeof(IDerivedIList) }; yield return new object[] { typeof(IDerivedISetOfT<string>) }; } public static IEnumerable<object[]> CustomInterfaces_Dictionaries() { yield return new object[] { typeof(IDerivedIDictionaryOfTKeyTValue<string, string>) }; } [Fact] public async Task IReadOnlyDictionary_NotSupportedKey() { await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.DeserializeWrapper<IReadOnlyDictionary<Uri, int>>(@"{""http://foo"":1}")); await Assert.ThrowsAsync<NotSupportedException>(async () => await JsonSerializerWrapperForString.SerializeWrapper(new GenericIReadOnlyDictionaryWrapper<Uri, int>(new Dictionary<Uri, int> { { new Uri("http://foo"), 1 } }))); } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/tests/JIT/Directed/nullabletypes/isinstvaluetype_do.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="isinstvaluetype.cs" /> <Compile Include="Desktop\StructDefinitions.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="isinstvaluetype.cs" /> <Compile Include="Desktop\StructDefinitions.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Text.Json/tests/System.Text.Json.FSharp.Tests/System.Text.Json.FSharp.Tests.fsproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> </PropertyGroup> <ItemGroup> <Compile Include="Helpers.fs" /> <Compile Include="OptionTests.fs" /> <Compile Include="ValueOptionTests.fs" /> <Compile Include="CollectionTests.fs" /> <Compile Include="RecordTests.fs" /> <Compile Include="UnionTests.fs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> </PropertyGroup> <ItemGroup> <Compile Include="Helpers.fs" /> <Compile Include="OptionTests.fs" /> <Compile Include="ValueOptionTests.fs" /> <Compile Include="CollectionTests.fs" /> <Compile Include="RecordTests.fs" /> <Compile Include="UnionTests.fs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/workloads/VSSetup.props
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup> <VSSetupProps>1</VSSetupProps> </PropertyGroup> <PropertyGroup> <VSDropServiceUri>https://vsdrop.corp.microsoft.com/file/v1/</VSDropServiceUri> <DropServiceUri>https://devdiv.artifacts.visualstudio.com/</DropServiceUri> <DropExe>$(MSBuildThisDirectory)Tools\Drop.App\lib\net45\Drop.exe</DropExe> <!-- Default drop expiration date is 10 years from now --> <DropExpiration Condition="'$(DropExpiration)' == ''">10</DropExpiration> <DropExpirationDate>$([System.DateTime]::Now.AddYears($(DropExpiration)).ToString("M/d/yyyy h:m:s tt"))</DropExpirationDate> <!-- Timeout in minutes --> <DropTimeout>10</DropTimeout> <!-- Can be set to 'info', 'warn', 'error', 'verbose' --> <DropTraceLevel>verbose</DropTraceLevel> <!-- Commandline parameters for drop.exe --> <DropParamService>-s &quot;$(DropServiceUri)&quot;</DropParamService> <DropParamTimeout>--timeout &quot;$(DropTimeout)&quot;</DropParamTimeout> <DropParamTraceLevel>--tracelevel &quot;$(DropTraceLevel)&quot;</DropParamTraceLevel> <DropParamExpirationDate>-x &quot;$(DropExpirationDate)&quot;</DropParamExpirationDate> <!-- Use AAD for authentication --> <DropParamAuth>-a</DropParamAuth> </PropertyGroup> <PropertyGroup> <ManifestTeamProject Condition="'$(ManifestTeamProject)' == ''">dotnet</ManifestTeamProject> <ManifestRepositoryName Condition="'$(ManifestRepositoryName)' == ''">installer</ManifestRepositoryName> <ManifestBuildBranch Condition="'$(ManifestBuildBranch)' == ''">local_build</ManifestBuildBranch> <ManifestBuildNumber Condition="'$(ManifestBuildNumber)' == ''">$([System.DateTime]::Now.ToString("yyMMdd")).1</ManifestBuildNumber> </PropertyGroup> <PropertyGroup> <ManifestPublishUrl>https://vsdrop.corp.microsoft.com/file/v1/Products/$(ManifestTeamProject)/$(ManifestRepositoryName)/$(ManifestBuildBranch)/$(ManifestBuildNumber);</ManifestPublishUrl> </PropertyGroup> <PropertyGroup> <ManifestIntermediateOutputPath>$(OutputPath)\obj\$(MSBuildProject)</ManifestIntermediateOutputPath> </PropertyGroup> </Project>
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup> <VSSetupProps>1</VSSetupProps> </PropertyGroup> <PropertyGroup> <VSDropServiceUri>https://vsdrop.corp.microsoft.com/file/v1/</VSDropServiceUri> <DropServiceUri>https://devdiv.artifacts.visualstudio.com/</DropServiceUri> <DropExe>$(MSBuildThisDirectory)Tools\Drop.App\lib\net45\Drop.exe</DropExe> <!-- Default drop expiration date is 10 years from now --> <DropExpiration Condition="'$(DropExpiration)' == ''">10</DropExpiration> <DropExpirationDate>$([System.DateTime]::Now.AddYears($(DropExpiration)).ToString("M/d/yyyy h:m:s tt"))</DropExpirationDate> <!-- Timeout in minutes --> <DropTimeout>10</DropTimeout> <!-- Can be set to 'info', 'warn', 'error', 'verbose' --> <DropTraceLevel>verbose</DropTraceLevel> <!-- Commandline parameters for drop.exe --> <DropParamService>-s &quot;$(DropServiceUri)&quot;</DropParamService> <DropParamTimeout>--timeout &quot;$(DropTimeout)&quot;</DropParamTimeout> <DropParamTraceLevel>--tracelevel &quot;$(DropTraceLevel)&quot;</DropParamTraceLevel> <DropParamExpirationDate>-x &quot;$(DropExpirationDate)&quot;</DropParamExpirationDate> <!-- Use AAD for authentication --> <DropParamAuth>-a</DropParamAuth> </PropertyGroup> <PropertyGroup> <ManifestTeamProject Condition="'$(ManifestTeamProject)' == ''">dotnet</ManifestTeamProject> <ManifestRepositoryName Condition="'$(ManifestRepositoryName)' == ''">installer</ManifestRepositoryName> <ManifestBuildBranch Condition="'$(ManifestBuildBranch)' == ''">local_build</ManifestBuildBranch> <ManifestBuildNumber Condition="'$(ManifestBuildNumber)' == ''">$([System.DateTime]::Now.ToString("yyMMdd")).1</ManifestBuildNumber> </PropertyGroup> <PropertyGroup> <ManifestPublishUrl>https://vsdrop.corp.microsoft.com/file/v1/Products/$(ManifestTeamProject)/$(ManifestRepositoryName)/$(ManifestBuildBranch)/$(ManifestBuildNumber);</ManifestPublishUrl> </PropertyGroup> <PropertyGroup> <ManifestIntermediateOutputPath>$(OutputPath)\obj\$(MSBuildProject)</ManifestIntermediateOutputPath> </PropertyGroup> </Project>
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Private.CoreLib/src/System/Diagnostics/CodeAnalysis/DynamicallyAccessedMemberTypes.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics.CodeAnalysis { /// <summary> /// Specifies the types of members that are dynamically accessed. /// /// This enumeration has a <see cref="FlagsAttribute"/> attribute that allows a /// bitwise combination of its member values. /// </summary> [Flags] #if SYSTEM_PRIVATE_CORELIB public #else internal #endif enum DynamicallyAccessedMemberTypes { /// <summary> /// Specifies no members. /// </summary> None = 0, /// <summary> /// Specifies the default, parameterless public constructor. /// </summary> PublicParameterlessConstructor = 0x0001, /// <summary> /// Specifies all public constructors. /// </summary> PublicConstructors = 0x0002 | PublicParameterlessConstructor, /// <summary> /// Specifies all non-public constructors. /// </summary> NonPublicConstructors = 0x0004, /// <summary> /// Specifies all public methods. /// </summary> PublicMethods = 0x0008, /// <summary> /// Specifies all non-public methods. /// </summary> NonPublicMethods = 0x0010, /// <summary> /// Specifies all public fields. /// </summary> PublicFields = 0x0020, /// <summary> /// Specifies all non-public fields. /// </summary> NonPublicFields = 0x0040, /// <summary> /// Specifies all public nested types. /// </summary> PublicNestedTypes = 0x0080, /// <summary> /// Specifies all non-public nested types. /// </summary> NonPublicNestedTypes = 0x0100, /// <summary> /// Specifies all public properties. /// </summary> PublicProperties = 0x0200, /// <summary> /// Specifies all non-public properties. /// </summary> NonPublicProperties = 0x0400, /// <summary> /// Specifies all public events. /// </summary> PublicEvents = 0x0800, /// <summary> /// Specifies all non-public events. /// </summary> NonPublicEvents = 0x1000, /// <summary> /// Specifies all interfaces implemented by the type. /// </summary> Interfaces = 0x2000, /// <summary> /// Specifies all members. /// </summary> All = ~None } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics.CodeAnalysis { /// <summary> /// Specifies the types of members that are dynamically accessed. /// /// This enumeration has a <see cref="FlagsAttribute"/> attribute that allows a /// bitwise combination of its member values. /// </summary> [Flags] #if SYSTEM_PRIVATE_CORELIB public #else internal #endif enum DynamicallyAccessedMemberTypes { /// <summary> /// Specifies no members. /// </summary> None = 0, /// <summary> /// Specifies the default, parameterless public constructor. /// </summary> PublicParameterlessConstructor = 0x0001, /// <summary> /// Specifies all public constructors. /// </summary> PublicConstructors = 0x0002 | PublicParameterlessConstructor, /// <summary> /// Specifies all non-public constructors. /// </summary> NonPublicConstructors = 0x0004, /// <summary> /// Specifies all public methods. /// </summary> PublicMethods = 0x0008, /// <summary> /// Specifies all non-public methods. /// </summary> NonPublicMethods = 0x0010, /// <summary> /// Specifies all public fields. /// </summary> PublicFields = 0x0020, /// <summary> /// Specifies all non-public fields. /// </summary> NonPublicFields = 0x0040, /// <summary> /// Specifies all public nested types. /// </summary> PublicNestedTypes = 0x0080, /// <summary> /// Specifies all non-public nested types. /// </summary> NonPublicNestedTypes = 0x0100, /// <summary> /// Specifies all public properties. /// </summary> PublicProperties = 0x0200, /// <summary> /// Specifies all non-public properties. /// </summary> NonPublicProperties = 0x0400, /// <summary> /// Specifies all public events. /// </summary> PublicEvents = 0x0800, /// <summary> /// Specifies all non-public events. /// </summary> NonPublicEvents = 0x1000, /// <summary> /// Specifies all interfaces implemented by the type. /// </summary> Interfaces = 0x2000, /// <summary> /// Specifies all members. /// </summary> All = ~None } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Diagnostics.TraceSource/ref/System.Diagnostics.TraceSource.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Diagnostics { [System.Diagnostics.SwitchLevelAttribute(typeof(bool))] public partial class BooleanSwitch : System.Diagnostics.Switch { public BooleanSwitch(string displayName, string? description) : base (default(string), default(string)) { } public BooleanSwitch(string displayName, string? description, string defaultSwitchValue) : base (default(string), default(string)) { } public bool Enabled { get { throw null; } set { } } protected override void OnValueChanged() { } } public partial class CorrelationManager { internal CorrelationManager() { } public System.Guid ActivityId { get { throw null; } set { } } public System.Collections.Stack LogicalOperationStack { get { throw null; } } public void StartLogicalOperation() { } public void StartLogicalOperation(object operationId) { } public void StopLogicalOperation() { } } public partial class DefaultTraceListener : System.Diagnostics.TraceListener { public DefaultTraceListener() { } public bool AssertUiEnabled { get { throw null; } set { } } public string? LogFileName { get { throw null; } set { } } public override void Fail(string? message) { } public override void Fail(string? message, string? detailMessage) { } public override void Write(string? message) { } public override void WriteLine(string? message) { } } public partial class EventTypeFilter : System.Diagnostics.TraceFilter { public EventTypeFilter(System.Diagnostics.SourceLevels level) { } public System.Diagnostics.SourceLevels EventType { get { throw null; } set { } } public override bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data) { throw null; } } public partial class SourceFilter : System.Diagnostics.TraceFilter { public SourceFilter(string source) { } public string Source { get { throw null; } set { } } public override bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data) { throw null; } } [System.FlagsAttribute] public enum SourceLevels { All = -1, Off = 0, Critical = 1, Error = 3, Warning = 7, Information = 15, Verbose = 31, [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] ActivityTracing = 65280, } public partial class SourceSwitch : System.Diagnostics.Switch { public SourceSwitch(string name) : base (default(string), default(string)) { } public SourceSwitch(string displayName, string defaultSwitchValue) : base (default(string), default(string)) { } public System.Diagnostics.SourceLevels Level { get { throw null; } set { } } protected override void OnValueChanged() { } public bool ShouldTrace(System.Diagnostics.TraceEventType eventType) { throw null; } } public abstract partial class Switch { protected Switch(string displayName, string? description) { } protected Switch(string displayName, string? description, string defaultSwitchValue) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public string Description { get { throw null; } } public string DisplayName { get { throw null; } } protected int SwitchSetting { get { throw null; } set { } } protected string Value { get { throw null; } set { } } protected virtual string[]? GetSupportedAttributes() { throw null; } protected virtual void OnSwitchSettingChanged() { } protected virtual void OnValueChanged() { } } [System.AttributeUsageAttribute(System.AttributeTargets.Assembly | System.AttributeTargets.Class | System.AttributeTargets.Constructor | System.AttributeTargets.Event | System.AttributeTargets.Method | System.AttributeTargets.Property)] public sealed partial class SwitchAttribute : System.Attribute { public SwitchAttribute(string switchName, System.Type switchType) { } public string? SwitchDescription { get { throw null; } set { } } public string SwitchName { get { throw null; } set { } } public System.Type SwitchType { get { throw null; } set { } } [System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("Types may be trimmed from the assembly.")] public static System.Diagnostics.SwitchAttribute[] GetAll(System.Reflection.Assembly assembly) { throw null; } } [System.AttributeUsageAttribute(System.AttributeTargets.Class)] public sealed partial class SwitchLevelAttribute : System.Attribute { public SwitchLevelAttribute(System.Type switchLevelType) { } public System.Type SwitchLevelType { get { throw null; } set { } } } public sealed partial class Trace { internal Trace() { } public static bool AutoFlush { get { throw null; } set { } } public static System.Diagnostics.CorrelationManager CorrelationManager { get { throw null; } } public static int IndentLevel { get { throw null; } set { } } public static int IndentSize { get { throw null; } set { } } public static System.Diagnostics.TraceListenerCollection Listeners { get { throw null; } } public static bool UseGlobalLock { get { throw null; } set { } } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition, string? message, string? detailMessage) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Close() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Fail(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Fail(string? message, string? detailMessage) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Flush() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Indent() { } public static void Refresh() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceError(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceError(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceInformation(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceInformation(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceWarning(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceWarning(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Unindent() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, string? message, string? category) { } } public partial class TraceEventCache { public TraceEventCache() { } public string Callstack { get { throw null; } } public System.DateTime DateTime { get { throw null; } } public System.Collections.Stack LogicalOperationStack { get { throw null; } } public int ProcessId { get { throw null; } } public string ThreadId { get { throw null; } } public long Timestamp { get { throw null; } } } public enum TraceEventType { Critical = 1, Error = 2, Warning = 4, Information = 8, Verbose = 16, Start = 256, Stop = 512, Suspend = 1024, Resume = 2048, Transfer = 4096, } public abstract partial class TraceFilter { protected TraceFilter() { } public abstract bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data); } public enum TraceLevel { Off = 0, Error = 1, Warning = 2, Info = 3, Verbose = 4, } public abstract partial class TraceListener : System.MarshalByRefObject, System.IDisposable { protected TraceListener() { } protected TraceListener(string? name) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public System.Diagnostics.TraceFilter? Filter { get { throw null; } set { } } public int IndentLevel { get { throw null; } set { } } public int IndentSize { get { throw null; } set { } } public virtual bool IsThreadSafe { get { throw null; } } [System.Diagnostics.CodeAnalysis.AllowNullAttribute] public virtual string Name { get { throw null; } set { } } protected bool NeedIndent { get { throw null; } set { } } public System.Diagnostics.TraceOptions TraceOutputOptions { get { throw null; } set { } } public virtual void Close() { } public void Dispose() { } protected virtual void Dispose(bool disposing) { } public virtual void Fail(string? message) { } public virtual void Fail(string? message, string? detailMessage) { } public virtual void Flush() { } protected virtual string[]? GetSupportedAttributes() { throw null; } public virtual void TraceData(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, object? data) { } public virtual void TraceData(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, params object?[]? data) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, string? message) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, string? format, params object?[]? args) { } public virtual void TraceTransfer(System.Diagnostics.TraceEventCache? eventCache, string source, int id, string? message, System.Guid relatedActivityId) { } public virtual void Write(object? o) { } public virtual void Write(object? o, string? category) { } public abstract void Write(string? message); public virtual void Write(string? message, string? category) { } protected virtual void WriteIndent() { } public virtual void WriteLine(object? o) { } public virtual void WriteLine(object? o, string? category) { } public abstract void WriteLine(string? message); public virtual void WriteLine(string? message, string? category) { } } public partial class TraceListenerCollection : System.Collections.ICollection, System.Collections.IEnumerable, System.Collections.IList { internal TraceListenerCollection() { } public int Count { get { throw null; } } public System.Diagnostics.TraceListener this[int i] { get { throw null; } set { } } public System.Diagnostics.TraceListener? this[string name] { get { throw null; } } bool System.Collections.ICollection.IsSynchronized { get { throw null; } } object System.Collections.ICollection.SyncRoot { get { throw null; } } bool System.Collections.IList.IsFixedSize { get { throw null; } } bool System.Collections.IList.IsReadOnly { get { throw null; } } object? System.Collections.IList.this[int index] { get { throw null; } set { } } public int Add(System.Diagnostics.TraceListener listener) { throw null; } public void AddRange(System.Diagnostics.TraceListenerCollection value) { } public void AddRange(System.Diagnostics.TraceListener[] value) { } public void Clear() { } public bool Contains(System.Diagnostics.TraceListener? listener) { throw null; } public void CopyTo(System.Diagnostics.TraceListener[] listeners, int index) { } public System.Collections.IEnumerator GetEnumerator() { throw null; } public int IndexOf(System.Diagnostics.TraceListener? listener) { throw null; } public void Insert(int index, System.Diagnostics.TraceListener listener) { } public void Remove(System.Diagnostics.TraceListener? listener) { } public void Remove(string name) { } public void RemoveAt(int index) { } void System.Collections.ICollection.CopyTo(System.Array array, int index) { } int System.Collections.IList.Add(object? value) { throw null; } bool System.Collections.IList.Contains(object? value) { throw null; } int System.Collections.IList.IndexOf(object? value) { throw null; } void System.Collections.IList.Insert(int index, object? value) { } void System.Collections.IList.Remove(object? value) { } } [System.FlagsAttribute] public enum TraceOptions { None = 0, LogicalOperationStack = 1, DateTime = 2, Timestamp = 4, ProcessId = 8, ThreadId = 16, Callstack = 32, } public partial class TraceSource { public TraceSource(string name) { } public TraceSource(string name, System.Diagnostics.SourceLevels defaultLevel) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public System.Diagnostics.TraceListenerCollection Listeners { get { throw null; } } public string Name { get { throw null; } } public System.Diagnostics.SourceSwitch Switch { get { throw null; } set { } } public void Close() { } public void Flush() { } protected virtual string[]? GetSupportedAttributes() { throw null; } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceData(System.Diagnostics.TraceEventType eventType, int id, object? data) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceData(System.Diagnostics.TraceEventType eventType, int id, params object?[]? data) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id, string? format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceInformation(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceInformation(string? format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceTransfer(int id, string? message, System.Guid relatedActivityId) { } } [System.Diagnostics.SwitchLevelAttribute(typeof(System.Diagnostics.TraceLevel))] public partial class TraceSwitch : System.Diagnostics.Switch { public TraceSwitch(string displayName, string? description) : base (default(string), default(string)) { } public TraceSwitch(string displayName, string? description, string defaultSwitchValue) : base (default(string), default(string)) { } public System.Diagnostics.TraceLevel Level { get { throw null; } set { } } public bool TraceError { get { throw null; } } public bool TraceInfo { get { throw null; } } public bool TraceVerbose { get { throw null; } } public bool TraceWarning { get { throw null; } } protected override void OnSwitchSettingChanged() { } protected override void OnValueChanged() { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Diagnostics { [System.Diagnostics.SwitchLevelAttribute(typeof(bool))] public partial class BooleanSwitch : System.Diagnostics.Switch { public BooleanSwitch(string displayName, string? description) : base (default(string), default(string)) { } public BooleanSwitch(string displayName, string? description, string defaultSwitchValue) : base (default(string), default(string)) { } public bool Enabled { get { throw null; } set { } } protected override void OnValueChanged() { } } public partial class CorrelationManager { internal CorrelationManager() { } public System.Guid ActivityId { get { throw null; } set { } } public System.Collections.Stack LogicalOperationStack { get { throw null; } } public void StartLogicalOperation() { } public void StartLogicalOperation(object operationId) { } public void StopLogicalOperation() { } } public partial class DefaultTraceListener : System.Diagnostics.TraceListener { public DefaultTraceListener() { } public bool AssertUiEnabled { get { throw null; } set { } } public string? LogFileName { get { throw null; } set { } } public override void Fail(string? message) { } public override void Fail(string? message, string? detailMessage) { } public override void Write(string? message) { } public override void WriteLine(string? message) { } } public partial class EventTypeFilter : System.Diagnostics.TraceFilter { public EventTypeFilter(System.Diagnostics.SourceLevels level) { } public System.Diagnostics.SourceLevels EventType { get { throw null; } set { } } public override bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data) { throw null; } } public partial class SourceFilter : System.Diagnostics.TraceFilter { public SourceFilter(string source) { } public string Source { get { throw null; } set { } } public override bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data) { throw null; } } [System.FlagsAttribute] public enum SourceLevels { All = -1, Off = 0, Critical = 1, Error = 3, Warning = 7, Information = 15, Verbose = 31, [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] ActivityTracing = 65280, } public partial class SourceSwitch : System.Diagnostics.Switch { public SourceSwitch(string name) : base (default(string), default(string)) { } public SourceSwitch(string displayName, string defaultSwitchValue) : base (default(string), default(string)) { } public System.Diagnostics.SourceLevels Level { get { throw null; } set { } } protected override void OnValueChanged() { } public bool ShouldTrace(System.Diagnostics.TraceEventType eventType) { throw null; } } public abstract partial class Switch { protected Switch(string displayName, string? description) { } protected Switch(string displayName, string? description, string defaultSwitchValue) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public string Description { get { throw null; } } public string DisplayName { get { throw null; } } protected int SwitchSetting { get { throw null; } set { } } protected string Value { get { throw null; } set { } } protected virtual string[]? GetSupportedAttributes() { throw null; } protected virtual void OnSwitchSettingChanged() { } protected virtual void OnValueChanged() { } } [System.AttributeUsageAttribute(System.AttributeTargets.Assembly | System.AttributeTargets.Class | System.AttributeTargets.Constructor | System.AttributeTargets.Event | System.AttributeTargets.Method | System.AttributeTargets.Property)] public sealed partial class SwitchAttribute : System.Attribute { public SwitchAttribute(string switchName, System.Type switchType) { } public string? SwitchDescription { get { throw null; } set { } } public string SwitchName { get { throw null; } set { } } public System.Type SwitchType { get { throw null; } set { } } [System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("Types may be trimmed from the assembly.")] public static System.Diagnostics.SwitchAttribute[] GetAll(System.Reflection.Assembly assembly) { throw null; } } [System.AttributeUsageAttribute(System.AttributeTargets.Class)] public sealed partial class SwitchLevelAttribute : System.Attribute { public SwitchLevelAttribute(System.Type switchLevelType) { } public System.Type SwitchLevelType { get { throw null; } set { } } } public sealed partial class Trace { internal Trace() { } public static bool AutoFlush { get { throw null; } set { } } public static System.Diagnostics.CorrelationManager CorrelationManager { get { throw null; } } public static int IndentLevel { get { throw null; } set { } } public static int IndentSize { get { throw null; } set { } } public static System.Diagnostics.TraceListenerCollection Listeners { get { throw null; } } public static bool UseGlobalLock { get { throw null; } set { } } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Assert(bool condition, string? message, string? detailMessage) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Close() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Fail(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Fail(string? message, string? detailMessage) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Flush() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Indent() { } public static void Refresh() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceError(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceError(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceInformation(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceInformation(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceWarning(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void TraceWarning(string format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Unindent() { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void Write(string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteIf(bool condition, string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLine(string? message, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, object? value) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, object? value, string? category) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public static void WriteLineIf(bool condition, string? message, string? category) { } } public partial class TraceEventCache { public TraceEventCache() { } public string Callstack { get { throw null; } } public System.DateTime DateTime { get { throw null; } } public System.Collections.Stack LogicalOperationStack { get { throw null; } } public int ProcessId { get { throw null; } } public string ThreadId { get { throw null; } } public long Timestamp { get { throw null; } } } public enum TraceEventType { Critical = 1, Error = 2, Warning = 4, Information = 8, Verbose = 16, Start = 256, Stop = 512, Suspend = 1024, Resume = 2048, Transfer = 4096, } public abstract partial class TraceFilter { protected TraceFilter() { } public abstract bool ShouldTrace(System.Diagnostics.TraceEventCache? cache, string source, System.Diagnostics.TraceEventType eventType, int id, string? formatOrMessage, object?[]? args, object? data1, object?[]? data); } public enum TraceLevel { Off = 0, Error = 1, Warning = 2, Info = 3, Verbose = 4, } public abstract partial class TraceListener : System.MarshalByRefObject, System.IDisposable { protected TraceListener() { } protected TraceListener(string? name) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public System.Diagnostics.TraceFilter? Filter { get { throw null; } set { } } public int IndentLevel { get { throw null; } set { } } public int IndentSize { get { throw null; } set { } } public virtual bool IsThreadSafe { get { throw null; } } [System.Diagnostics.CodeAnalysis.AllowNullAttribute] public virtual string Name { get { throw null; } set { } } protected bool NeedIndent { get { throw null; } set { } } public System.Diagnostics.TraceOptions TraceOutputOptions { get { throw null; } set { } } public virtual void Close() { } public void Dispose() { } protected virtual void Dispose(bool disposing) { } public virtual void Fail(string? message) { } public virtual void Fail(string? message, string? detailMessage) { } public virtual void Flush() { } protected virtual string[]? GetSupportedAttributes() { throw null; } public virtual void TraceData(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, object? data) { } public virtual void TraceData(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, params object?[]? data) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, string? message) { } public virtual void TraceEvent(System.Diagnostics.TraceEventCache? eventCache, string source, System.Diagnostics.TraceEventType eventType, int id, string? format, params object?[]? args) { } public virtual void TraceTransfer(System.Diagnostics.TraceEventCache? eventCache, string source, int id, string? message, System.Guid relatedActivityId) { } public virtual void Write(object? o) { } public virtual void Write(object? o, string? category) { } public abstract void Write(string? message); public virtual void Write(string? message, string? category) { } protected virtual void WriteIndent() { } public virtual void WriteLine(object? o) { } public virtual void WriteLine(object? o, string? category) { } public abstract void WriteLine(string? message); public virtual void WriteLine(string? message, string? category) { } } public partial class TraceListenerCollection : System.Collections.ICollection, System.Collections.IEnumerable, System.Collections.IList { internal TraceListenerCollection() { } public int Count { get { throw null; } } public System.Diagnostics.TraceListener this[int i] { get { throw null; } set { } } public System.Diagnostics.TraceListener? this[string name] { get { throw null; } } bool System.Collections.ICollection.IsSynchronized { get { throw null; } } object System.Collections.ICollection.SyncRoot { get { throw null; } } bool System.Collections.IList.IsFixedSize { get { throw null; } } bool System.Collections.IList.IsReadOnly { get { throw null; } } object? System.Collections.IList.this[int index] { get { throw null; } set { } } public int Add(System.Diagnostics.TraceListener listener) { throw null; } public void AddRange(System.Diagnostics.TraceListenerCollection value) { } public void AddRange(System.Diagnostics.TraceListener[] value) { } public void Clear() { } public bool Contains(System.Diagnostics.TraceListener? listener) { throw null; } public void CopyTo(System.Diagnostics.TraceListener[] listeners, int index) { } public System.Collections.IEnumerator GetEnumerator() { throw null; } public int IndexOf(System.Diagnostics.TraceListener? listener) { throw null; } public void Insert(int index, System.Diagnostics.TraceListener listener) { } public void Remove(System.Diagnostics.TraceListener? listener) { } public void Remove(string name) { } public void RemoveAt(int index) { } void System.Collections.ICollection.CopyTo(System.Array array, int index) { } int System.Collections.IList.Add(object? value) { throw null; } bool System.Collections.IList.Contains(object? value) { throw null; } int System.Collections.IList.IndexOf(object? value) { throw null; } void System.Collections.IList.Insert(int index, object? value) { } void System.Collections.IList.Remove(object? value) { } } [System.FlagsAttribute] public enum TraceOptions { None = 0, LogicalOperationStack = 1, DateTime = 2, Timestamp = 4, ProcessId = 8, ThreadId = 16, Callstack = 32, } public partial class TraceSource { public TraceSource(string name) { } public TraceSource(string name, System.Diagnostics.SourceLevels defaultLevel) { } public System.Collections.Specialized.StringDictionary Attributes { get { throw null; } } public System.Diagnostics.TraceListenerCollection Listeners { get { throw null; } } public string Name { get { throw null; } } public System.Diagnostics.SourceSwitch Switch { get { throw null; } set { } } public void Close() { } public void Flush() { } protected virtual string[]? GetSupportedAttributes() { throw null; } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceData(System.Diagnostics.TraceEventType eventType, int id, object? data) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceData(System.Diagnostics.TraceEventType eventType, int id, params object?[]? data) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id, string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceEvent(System.Diagnostics.TraceEventType eventType, int id, string? format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceInformation(string? message) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceInformation(string? format, params object?[]? args) { } [System.Diagnostics.ConditionalAttribute("TRACE")] public void TraceTransfer(int id, string? message, System.Guid relatedActivityId) { } } [System.Diagnostics.SwitchLevelAttribute(typeof(System.Diagnostics.TraceLevel))] public partial class TraceSwitch : System.Diagnostics.Switch { public TraceSwitch(string displayName, string? description) : base (default(string), default(string)) { } public TraceSwitch(string displayName, string? description, string defaultSwitchValue) : base (default(string), default(string)) { } public System.Diagnostics.TraceLevel Level { get { throw null; } set { } } public bool TraceError { get { throw null; } } public bool TraceInfo { get { throw null; } } public bool TraceVerbose { get { throw null; } } public bool TraceWarning { get { throw null; } } protected override void OnSwitchSettingChanged() { } protected override void OnValueChanged() { } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/libraries/System.Security.Permissions/src/System/Security/Permissions/StorePermission.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Permissions { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif public sealed class StorePermission : CodeAccessPermission, IUnrestrictedPermission { public StorePermission(PermissionState state) { } public StorePermission(StorePermissionFlags flag) { } public StorePermissionFlags Flags { get; set; } public bool IsUnrestricted() { return false; } public override IPermission Union(IPermission target) { return null; } public override bool IsSubsetOf(IPermission target) { return false; } public override IPermission Intersect(IPermission target) { return null; } public override IPermission Copy() { return null; } public override SecurityElement ToXml() { return null; } public override void FromXml(SecurityElement securityElement) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Permissions { #if NETCOREAPP [Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] #endif public sealed class StorePermission : CodeAccessPermission, IUnrestrictedPermission { public StorePermission(PermissionState state) { } public StorePermission(StorePermissionFlags flag) { } public StorePermissionFlags Flags { get; set; } public bool IsUnrestricted() { return false; } public override IPermission Union(IPermission target) { return null; } public override bool IsSubsetOf(IPermission target) { return false; } public override IPermission Intersect(IPermission target) { return null; } public override IPermission Copy() { return null; } public override SecurityElement ToXml() { return null; } public override void FromXml(SecurityElement securityElement) { } } }
-1
dotnet/runtime
66,195
Fix source generated regex compilation failure due to mismatched notion of atomic
During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
stephentoub
2022-03-04T15:21:34Z
2022-03-04T20:45:34Z
b259ef087d3faf2e3147e2bc21369b03794eae0d
39626e59544c0c949d2439d1e94ac3fc06343c27
Fix source generated regex compilation failure due to mismatched notion of atomic. During and post-parsing, we apply various optimizations to the regex node tree, in particular trying to annotate as much as possible as atomic in order to eliminate unnecessary backtracking. Then later when RegexCompiler and the source generator view the final tree, they also compute for every node whether a child may backtrack, as doing so enables avoiding unnecessary backtracking-related code generation if the child is known to not backtrack (e.g. because it's now marked as atomic). However, things can go awry if the compiler / source generator's view of what's atomic differs from what's actually generated. Because of how optimizations are applied to the node tree, it's possible for a late optimization to make a transformation that then would enable a node to be made atomic, but we don't run that phase of the optimizer again, and thus the node is left non-atomic. Then the source generator comes along, does its analysis, and sees that the node should be treated as atomic. That leads to problems, because the node itself will have unnecessary backtracking code generated but the parent will rightly assume there wasn't anyway and won't generate the code necessary to compensate for it, or alternatively will generate code that causes problems (e.g. the source generator uses this information to determine whether it can output scopes). Our outer loop tests that source gen our full regex corpus caught a case where this was happening. A couple fixes, either of which on their own is sufficient to address this particular case, but each of which also brings other benefits: 1. When rendering a single-char loop, it consults the computed atomicity table to determine whether the rest of the source generation views it as atomic. If it does, it instead does an atomic rendering. 2. When we do our ending backtracking elimination pass (i.e. walking down the right-hand side of atomic nodes to make anything that ends them also be atomic), we should also recur into lookarounds. This also removes some duplicated code for reducing lookarounds, and renames some stale method names.
./src/coreclr/pal/src/libunwind/src/s390x/Linit_remote.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_remote.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_remote.c" #endif
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/jit/instrsxarch.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file was previously known as instrs.h // /***************************************************************************** * x86 instructions for the JIT compiler * * id -- the enum name for the instruction * nm -- textual name (for assembly dipslay) * um -- update mode, see IUM_xx enum (rd, wr, or rw) * mr -- base encoding for R/M[reg] addressing mode * mi -- base encoding for R/M,icon addressing mode * rm -- base encoding for reg,R/M addressing mode * a4 -- base encoding for eax,i32 addressing mode * rr -- base encoding for register addressing mode * flags -- flags, see INS_FLAGS_* enum * ******************************************************************************/ // clang-format off #if !defined(TARGET_XARCH) #error Unexpected target type #endif #ifndef INST1 #error At least INST1 must be defined before including this file. #endif /*****************************************************************************/ #ifndef INST0 #define INST0(id, nm, um, mr, flags) #endif #ifndef INST2 #define INST2(id, nm, um, mr, mi, flags) #endif #ifndef INST3 #define INST3(id, nm, um, mr, mi, rm, flags) #endif #ifndef INST4 #define INST4(id, nm, um, mr, mi, rm, a4, flags) #endif #ifndef INST5 #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) #endif /*****************************************************************************/ /* The following is x86-specific */ /*****************************************************************************/ // id nm um mr mi rm a4 rr flags INST5(invalid, "INVALID", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST5(push, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None ) INST5(pop, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None ) // Does not affect the stack tracking in the emitter INST5(push_hide, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None ) INST5(pop_hide, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None ) INST5(inc, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000040, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | INS_FLAGS_Has_Wbit ) INST5(inc_l, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C0FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF ) INST5(dec, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000048, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | INS_FLAGS_Has_Wbit ) INST5(dec_l, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C8FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF ) // Multi-byte opcodes without modrm are represented in mixed endian fashion. // See comment around quarter way through this file for more information. INST5(bswap, "bswap", IUM_RW, 0x0F00C8, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C80F, INS_FLAGS_None ) // id nm um mr mi rm a4 flags INST4(add, "add", IUM_RW, 0x000000, 0x000080, 0x000002, 0x000004, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(or, "or", IUM_RW, 0x000008, 0x000880, 0x00000A, 0x00000C, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(adc, "adc", IUM_RW, 0x000010, 0x001080, 0x000012, 0x000014, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | Reads_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(sbb, "sbb", IUM_RW, 0x000018, 0x001880, 0x00001A, 0x00001C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | Reads_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(and, "and", IUM_RW, 0x000020, 0x002080, 0x000022, 0x000024, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(sub, "sub", IUM_RW, 0x000028, 0x002880, 0x00002A, 0x00002C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(xor, "xor", IUM_RW, 0x000030, 0x003080, 0x000032, 0x000034, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(cmp, "cmp", IUM_RD, 0x000038, 0x003880, 0x00003A, 0x00003C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(test, "test", IUM_RD, 0x000084, 0x0000F6, 0x000084, 0x0000A8, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Wbit ) INST4(mov, "mov", IUM_WR, 0x000088, 0x0000C6, 0x00008A, 0x0000B0, INS_FLAGS_Has_Wbit ) INST4(lea, "lea", IUM_WR, BAD_CODE, BAD_CODE, 0x00008D, BAD_CODE, INS_FLAGS_None ) // id nm um mr mi rm flags // Note that emitter has only partial support for BT. It can only emit the reg,reg form // and the registers need to be reversed to get the correct encoding. INST3(bt, "bt", IUM_RD, 0x0F00A3, BAD_CODE, 0x0F00A3, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF ) INST3(bsf, "bsf", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BC, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF ) INST3(bsr, "bsr", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BD, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF ) INST3(movsx, "movsx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BE, INS_FLAGS_Has_Wbit ) #ifdef TARGET_AMD64 INST3(movsxd, "movsxd", IUM_WR, BAD_CODE, BAD_CODE, 0x4800000063, INS_FLAGS_Has_Wbit ) #endif INST3(movzx, "movzx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00B6, INS_FLAGS_Has_Wbit ) INST3(cmovo, "cmovo", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0040, Reads_OF ) INST3(cmovno, "cmovno", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0041, Reads_OF ) INST3(cmovb, "cmovb", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0042, Reads_CF ) INST3(cmovae, "cmovae", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0043, Reads_CF ) INST3(cmove, "cmove", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0044, Reads_ZF ) INST3(cmovne, "cmovne", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0045, Reads_ZF ) INST3(cmovbe, "cmovbe", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0046, Reads_ZF | Reads_CF ) INST3(cmova, "cmova", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0047, Reads_ZF | Reads_CF ) INST3(cmovs, "cmovs", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0048, Reads_SF ) INST3(cmovns, "cmovns", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0049, Reads_SF ) INST3(cmovp, "cmovp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, Reads_PF ) INST3(cmovnp, "cmovnp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, Reads_PF ) INST3(cmovl, "cmovl", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004C, Reads_OF | Reads_SF ) INST3(cmovge, "cmovge", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004D, Reads_OF | Reads_SF ) INST3(cmovle, "cmovle", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004E, Reads_OF | Reads_SF | Reads_ZF ) INST3(cmovg, "cmovg", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004F, Reads_OF | Reads_SF | Reads_ZF ) INST3(xchg, "xchg", IUM_RW, 0x000086, BAD_CODE, 0x000086, INS_FLAGS_Has_Wbit ) INST3(imul, "imul", IUM_RW, 0x0F00AC, BAD_CODE, 0x0F00AF, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) // id nm um mr mi rm flags // Instead of encoding these as 3-operand instructions, we encode them // as 2-operand instructions with the target register being implicit // implicit_reg = op1*op2_icon #define INSTMUL INST3 INSTMUL(imul_AX, "imul", IUM_RD, BAD_CODE, 0x000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_CX, "imul", IUM_RD, BAD_CODE, 0x000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_DX, "imul", IUM_RD, BAD_CODE, 0x001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_BX, "imul", IUM_RD, BAD_CODE, 0x001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_SP, "imul", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_BP, "imul", IUM_RD, BAD_CODE, 0x002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_SI, "imul", IUM_RD, BAD_CODE, 0x003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_DI, "imul", IUM_RD, BAD_CODE, 0x003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) #ifdef TARGET_AMD64 INSTMUL(imul_08, "imul", IUM_RD, BAD_CODE, 0x4400000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_09, "imul", IUM_RD, BAD_CODE, 0x4400000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_10, "imul", IUM_RD, BAD_CODE, 0x4400001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_11, "imul", IUM_RD, BAD_CODE, 0x4400001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_12, "imul", IUM_RD, BAD_CODE, 0x4400002068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_13, "imul", IUM_RD, BAD_CODE, 0x4400002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_14, "imul", IUM_RD, BAD_CODE, 0x4400003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_15, "imul", IUM_RD, BAD_CODE, 0x4400003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) #endif // TARGET_AMD64 // the hex codes in this file represent the instruction encoding as follows: // 0x0000ff00 - modrm byte position // 0x000000ff - last byte of opcode (before modrm) // 0x00ff0000 - first byte of opcode // 0xff000000 - middle byte of opcode, if needed (after first, before last) // // So a 1-byte opcode is: and with modrm: // 0x00000011 0x0000RM11 // // So a 2-byte opcode is: and with modrm: // 0x00002211 0x0011RM22 // // So a 3-byte opcode is: and with modrm: // 0x00113322 0x2211RM33 // // So a 4-byte opcode would be something like this: // 0x22114433 #define PACK3(byte1,byte2,byte3) (((byte1) << 16) | ((byte2) << 24) | (byte3)) #define PACK2(byte1,byte2) (((byte1) << 16) | (byte2)) #define SSEFLT(c) PACK3(0xf3, 0x0f, c) #define SSEDBL(c) PACK3(0xf2, 0x0f, c) #define PCKDBL(c) PACK3(0x66, 0x0f, c) #define PCKFLT(c) PACK2(0x0f,c) // These macros encode extra byte that is implicit in the macro. #define PACK4(byte1,byte2,byte3,byte4) (((byte1) << 16) | ((byte2) << 24) | (byte3) | ((byte4) << 8)) #define SSE38(c) PACK4(0x66, 0x0f, 0x38, c) #define SSE3A(c) PACK4(0x66, 0x0f, 0x3A, c) // VEX* encodes the implied leading opcode bytes in c1: // 1: implied 0f, 2: implied 0f 38, 3: implied 0f 3a #define VEX2INT(c1,c2) PACK3(c1, 0xc5, c2) #define VEX3INT(c1,c2) PACK4(c1, 0xc5, 0x02, c2) #define VEX3FLT(c1,c2) PACK4(c1, 0xc5, 0x02, c2) INST3(FIRST_SSE_INSTRUCTION, "FIRST_SSE_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // These are the SSE instructions used on x86 INST3(pmovmskb, "pmovmskb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD7), INS_FLAGS_None) // Move the MSB bits of all bytes in a xmm reg to an int reg INST3(movmskpd, "movmskpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x50), INS_FLAGS_None) // Extract 2-bit sign mask from xmm and store in reg. The upper bits of r32 or r64 are filled with zeros. INST3(movd, "movd", IUM_WR, PCKDBL(0x7E), BAD_CODE, PCKDBL(0x6E), INS_FLAGS_None) // Move Double/Quadword between mm regs <-> memory/r32/r64 regs, cleanup https://github.com/dotnet/runtime/issues/47943 INST3(movq, "movq", IUM_WR, PCKDBL(0xD6), BAD_CODE, SSEFLT(0x7E), INS_FLAGS_None) // Move Quadword between memory/mm <-> regs, cleanup https://github.com/dotnet/runtime/issues/47943 INST3(movsdsse2, "movsd", IUM_WR, SSEDBL(0x11), BAD_CODE, SSEDBL(0x10), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(punpckldq, "punpckldq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x62), INS_Flags_IsDstDstSrcAVXInstruction) INST3(xorps, "xorps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x57), INS_Flags_IsDstDstSrcAVXInstruction) // XOR packed singles INST3(cvttsd2si, "cvttsd2si", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2C), INS_FLAGS_None) // cvt with trunc scalar double to signed DWORDs INST3(movntdq, "movntdq", IUM_WR, PCKDBL(0xE7), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movnti, "movnti", IUM_WR, PCKFLT(0xC3), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movntpd, "movntpd", IUM_WR, PCKDBL(0x2B), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movntps, "movntps", IUM_WR, PCKFLT(0x2B), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movdqu, "movdqu", IUM_WR, SSEFLT(0x7F), BAD_CODE, SSEFLT(0x6F), INS_FLAGS_None) INST3(movdqa, "movdqa", IUM_WR, PCKDBL(0x7F), BAD_CODE, PCKDBL(0x6F), INS_FLAGS_None) INST3(movlpd, "movlpd", IUM_WR, PCKDBL(0x13), BAD_CODE, PCKDBL(0x12), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movlps, "movlps", IUM_WR, PCKFLT(0x13), BAD_CODE, PCKFLT(0x12), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movhpd, "movhpd", IUM_WR, PCKDBL(0x17), BAD_CODE, PCKDBL(0x16), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movhps, "movhps", IUM_WR, PCKFLT(0x17), BAD_CODE, PCKFLT(0x16), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movss, "movss", IUM_WR, SSEFLT(0x11), BAD_CODE, SSEFLT(0x10), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movapd, "movapd", IUM_WR, PCKDBL(0x29), BAD_CODE, PCKDBL(0x28), INS_FLAGS_None) INST3(movaps, "movaps", IUM_WR, PCKFLT(0x29), BAD_CODE, PCKFLT(0x28), INS_FLAGS_None) INST3(movupd, "movupd", IUM_WR, PCKDBL(0x11), BAD_CODE, PCKDBL(0x10), INS_FLAGS_None) INST3(movups, "movups", IUM_WR, PCKFLT(0x11), BAD_CODE, PCKFLT(0x10), INS_FLAGS_None) INST3(movhlps, "movhlps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x12), INS_Flags_IsDstDstSrcAVXInstruction) INST3(movlhps, "movlhps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x16), INS_Flags_IsDstDstSrcAVXInstruction) INST3(movmskps, "movmskps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x50), INS_FLAGS_None) INST3(unpckhps, "unpckhps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x15), INS_Flags_IsDstDstSrcAVXInstruction) INST3(unpcklps, "unpcklps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x14), INS_Flags_IsDstDstSrcAVXInstruction) INST3(maskmovdqu, "maskmovdqu", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF7), INS_FLAGS_None) INST3(shufps, "shufps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0xC6), INS_Flags_IsDstDstSrcAVXInstruction) INST3(shufpd, "shufpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC6), INS_Flags_IsDstDstSrcAVXInstruction) INST3(punpckhdq, "punpckhdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6A), INS_Flags_IsDstDstSrcAVXInstruction) INST3(lfence, "lfence", IUM_RD, 0x000FE8AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(mfence, "mfence", IUM_RD, 0x000FF0AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetchnta, "prefetchnta", IUM_RD, 0x000F0018, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht0, "prefetcht0", IUM_RD, 0x000F0818, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht1, "prefetcht1", IUM_RD, 0x000F1018, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht2, "prefetcht2", IUM_RD, 0x000F1818, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(sfence, "sfence", IUM_RD, 0x000FF8AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // SSE 2 arith INST3(addps, "addps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed singles INST3(addss, "addss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add scalar singles INST3(addpd, "addpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed doubles INST3(addsd, "addsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add scalar doubles INST3(mulps, "mulps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed singles INST3(mulss, "mulss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply scalar single INST3(mulpd, "mulpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed doubles INST3(mulsd, "mulsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply scalar doubles INST3(subps, "subps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed singles INST3(subss, "subss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract scalar singles INST3(subpd, "subpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed doubles INST3(subsd, "subsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract scalar doubles INST3(minps, "minps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum packed singles INST3(minss, "minss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum scalar single INST3(minpd, "minpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum packed doubles INST3(minsd, "minsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum scalar double INST3(divps, "divps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide packed singles INST3(divss, "divss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide scalar singles INST3(divpd, "divpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide packed doubles INST3(divsd, "divsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide scalar doubles INST3(maxps, "maxps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum packed singles INST3(maxss, "maxss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum scalar single INST3(maxpd, "maxpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum packed doubles INST3(maxsd, "maxsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum scalar double INST3(xorpd, "xorpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x57), INS_Flags_IsDstDstSrcAVXInstruction) // XOR packed doubles INST3(andps, "andps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x54), INS_Flags_IsDstDstSrcAVXInstruction) // AND packed singles INST3(andpd, "andpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x54), INS_Flags_IsDstDstSrcAVXInstruction) // AND packed doubles INST3(sqrtps, "sqrtps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x51), INS_FLAGS_None) // Sqrt of packed singles INST3(sqrtss, "sqrtss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x51), INS_Flags_IsDstSrcSrcAVXInstruction) // Sqrt of scalar single INST3(sqrtpd, "sqrtpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x51), INS_FLAGS_None) // Sqrt of packed doubles INST3(sqrtsd, "sqrtsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x51), INS_Flags_IsDstSrcSrcAVXInstruction) // Sqrt of scalar double INST3(andnps, "andnps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x55), INS_Flags_IsDstDstSrcAVXInstruction) // And-Not packed singles INST3(andnpd, "andnpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x55), INS_Flags_IsDstDstSrcAVXInstruction) // And-Not packed doubles INST3(orps, "orps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x56), INS_Flags_IsDstDstSrcAVXInstruction) // Or packed singles INST3(orpd, "orpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x56), INS_Flags_IsDstDstSrcAVXInstruction) // Or packed doubles INST3(haddpd, "haddpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x7C), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal add packed doubles INST3(haddps, "haddps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x7C), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal add packed floats INST3(hsubpd, "hsubpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x7D), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal subtract packed doubles INST3(hsubps, "hsubps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x7D), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal subtract packed floats INST3(addsubps, "addsubps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xD0), INS_Flags_IsDstDstSrcAVXInstruction) // Add/Subtract packed singles INST3(addsubpd, "addsubpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD0), INS_Flags_IsDstDstSrcAVXInstruction) // Add/Subtract packed doubles // SSE 2 approx arith INST3(rcpps, "rcpps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x53), INS_FLAGS_None) // Reciprocal of packed singles INST3(rcpss, "rcpss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x53), INS_Flags_IsDstSrcSrcAVXInstruction) // Reciprocal of scalar single INST3(rsqrtps, "rsqrtps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x52), INS_FLAGS_None) // Reciprocal Sqrt of packed singles INST3(rsqrtss, "rsqrtss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x52), INS_Flags_IsDstSrcSrcAVXInstruction) // Reciprocal Sqrt of scalar single // SSE2 conversions INST3(cvtpi2ps, "cvtpi2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2A), INS_FLAGS_None) // cvt packed DWORDs to singles INST3(cvtsi2ss, "cvtsi2ss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt DWORD to scalar single INST3(cvtpi2pd, "cvtpi2pd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2A), INS_FLAGS_None) // cvt packed DWORDs to doubles INST3(cvtsi2sd, "cvtsi2sd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt DWORD to scalar double INST3(cvttps2pi, "cvttps2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2C), INS_FLAGS_None) // cvt with trunc packed singles to DWORDs INST3(cvttss2si, "cvttss2si", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2C), INS_FLAGS_None) // cvt with trunc scalar single to DWORD INST3(cvttpd2pi, "cvttpd2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2C), INS_FLAGS_None) // cvt with trunc packed doubles to DWORDs INST3(cvtps2pi, "cvtps2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2D), INS_FLAGS_None) // cvt packed singles to DWORDs INST3(cvtss2si, "cvtss2si", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2D), INS_FLAGS_None) // cvt scalar single to DWORD INST3(cvtpd2pi, "cvtpd2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2D), INS_FLAGS_None) // cvt packed doubles to DWORDs INST3(cvtsd2si, "cvtsd2si", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2D), INS_FLAGS_None) // cvt scalar double to DWORD INST3(cvtps2pd, "cvtps2pd", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5A), INS_FLAGS_None) // cvt packed singles to doubles INST3(cvtpd2ps, "cvtpd2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5A), INS_FLAGS_None) // cvt packed doubles to singles INST3(cvtss2sd, "cvtss2sd", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt scalar single to scalar doubles INST3(cvtsd2ss, "cvtsd2ss", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt scalar double to scalar singles INST3(cvtdq2ps, "cvtdq2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5B), INS_FLAGS_None) // cvt packed DWORDs to singles INST3(cvtps2dq, "cvtps2dq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5B), INS_FLAGS_None) // cvt packed singles to DWORDs INST3(cvttps2dq, "cvttps2dq", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5B), INS_FLAGS_None) // cvt with trunc packed singles to DWORDs INST3(cvtpd2dq, "cvtpd2dq", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xE6), INS_FLAGS_None) // cvt packed doubles to DWORDs INST3(cvttpd2dq, "cvttpd2dq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE6), INS_FLAGS_None) // cvt with trunc packed doubles to DWORDs INST3(cvtdq2pd, "cvtdq2pd", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xE6), INS_FLAGS_None) // cvt packed DWORDs to doubles // SSE2 comparison instructions INST3(comiss, "comiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare singles INST3(comisd, "comisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare doubles INST3(ucomiss, "ucomiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare singles INST3(ucomisd, "ucomisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare doubles // SSE2 packed single/double comparison operations. // Note that these instructions not only compare but also overwrite the first source. INST3(cmpps, "cmpps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare packed singles INST3(cmppd, "cmppd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare packed doubles INST3(cmpss, "cmpss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare scalar singles INST3(cmpsd, "cmpsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare scalar doubles //SSE2 packed integer operations INST3(paddb, "paddb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed byte integers INST3(paddw, "paddw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFD), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed word (16-bit) integers INST3(paddd, "paddd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFE), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed double-word (32-bit) integers INST3(paddq, "paddq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD4), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed quad-word (64-bit) integers INST3(paddsb, "paddsb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed signed byte integers and saturate the results INST3(paddsw, "paddsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xED), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed signed word integers and saturate the results INST3(paddusb, "paddusb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed unsigned byte integers and saturate the results INST3(paddusw, "paddusw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDD), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed unsigned word integers and saturate the results INST3(pavgb, "pavgb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE0), INS_Flags_IsDstDstSrcAVXInstruction) // Average of packed byte integers INST3(pavgw, "pavgw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE3), INS_Flags_IsDstDstSrcAVXInstruction) // Average of packed word integers INST3(psubb, "psubb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed word (16-bit) integers INST3(psubw, "psubw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed word (16-bit) integers INST3(psubd, "psubd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFA), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed double-word (32-bit) integers INST3(psubq, "psubq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFB), INS_Flags_IsDstDstSrcAVXInstruction) // subtract packed quad-word (64-bit) integers INST3(pmaddwd, "pmaddwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst INST3(pmulhw, "pmulhw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE5), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply high the packed 16-bit signed integers INST3(pmulhuw, "pmulhuw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE4), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply high the packed 16-bit unsigned integers INST3(pmuludq, "pmuludq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF4), INS_Flags_IsDstDstSrcAVXInstruction) // packed multiply 32-bit unsigned integers and store 64-bit result INST3(pmullw, "pmullw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD5), INS_Flags_IsDstDstSrcAVXInstruction) // Packed multiply 16 bit unsigned integers and store lower 16 bits of each result INST3(pand, "pand", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDB), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise AND of two xmm regs INST3(pandn, "pandn", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDF), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise AND NOT of two xmm regs INST3(por, "por", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEB), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise OR of two xmm regs INST3(pxor, "pxor", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEF), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise XOR of two xmm regs INST3(psadbw, "psadbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Compute the sum of absolute differences of packed unsigned 8-bit integers INST3(psubsb, "psubsb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed 8-bit integers in b from packed 8-bit integers in a using saturation INST3(psubusb, "psubusb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation INST3(psubsw, "psubsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed 16-bit integers in b from packed 16-bit integers in a using saturation INST3(psubusw, "psubusw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation // Note that the shift immediates share the same encoding between left and right-shift, and are distinguished by the Reg/Opcode, // which is handled in emitxarch.cpp. INST3(psrldq, "psrldq", IUM_WR, BAD_CODE, PCKDBL(0x73), BAD_CODE, INS_Flags_IsDstDstSrcAVXInstruction) // Shift right logical of xmm reg by given number of bytes INST3(pslldq, "pslldq", IUM_WR, BAD_CODE, PCKDBL(0x73), BAD_CODE, INS_Flags_IsDstDstSrcAVXInstruction) // Shift left logical of xmm reg by given number of bytes INST3(psllw, "psllw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xF1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 16-bit integers INST3(pslld, "pslld", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xF2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 32-bit integers INST3(psllq, "psllq", IUM_WR, BAD_CODE, PCKDBL(0x73), PCKDBL(0xF3), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 64-bit integers INST3(psrlw, "psrlw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xD1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 16-bit integers INST3(psrld, "psrld", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xD2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 32-bit integers INST3(psrlq, "psrlq", IUM_WR, BAD_CODE, PCKDBL(0x73), PCKDBL(0xD3), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 64-bit integers INST3(psraw, "psraw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xE1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right arithmetic of 16-bit integers INST3(psrad, "psrad", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xE2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right arithmetic of 32-bit integers INST3(pmaxub, "pmaxub", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDE), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum unsigned bytes INST3(pminub, "pminub", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDA), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum unsigned bytes INST3(pmaxsw, "pmaxsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEE), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum signed words INST3(pminsw, "pminsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEA), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum signed words INST3(pcmpeqd, "pcmpeqd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x76), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 32-bit integers for equality INST3(pcmpgtd, "pcmpgtd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x66), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 32-bit signed integers for greater than INST3(pcmpeqw, "pcmpeqw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x75), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 16-bit integers for equality INST3(pcmpgtw, "pcmpgtw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x65), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 16-bit signed integers for greater than INST3(pcmpeqb, "pcmpeqb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x74), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 8-bit integers for equality INST3(pcmpgtb, "pcmpgtb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x64), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 8-bit signed integers for greater than INST3(pshufd, "pshufd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x70), INS_FLAGS_None) // Packed shuffle of 32-bit integers INST3(pshufhw, "pshufhw", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x70), INS_FLAGS_None) // Shuffle the high words in xmm2/m128 based on the encoding in imm8 and store the result in xmm1. INST3(pshuflw, "pshuflw", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x70), INS_FLAGS_None) // Shuffle the low words in xmm2/m128 based on the encoding in imm8 and store the result in xmm1. INST3(pextrw, "pextrw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC5), INS_FLAGS_None) // Extract 16-bit value into a r32 with zero extended to 32-bits INST3(pinsrw, "pinsrw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC4), INS_Flags_IsDstDstSrcAVXInstruction) // Insert word at index INST3(punpckhbw, "punpckhbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x68), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(punpcklbw, "punpcklbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x60), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (lo) INST3(punpckhqdq, "punpckhqdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6D), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen uint to ulong (hi) INST3(punpcklqdq, "punpcklqdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6C), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen uint to ulong (lo) INST3(punpckhwd, "punpckhwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x69), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ushort to uint (hi) INST3(punpcklwd, "punpcklwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x61), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ushort to uint (lo) INST3(unpckhpd, "unpckhpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x15), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(unpcklpd, "unpcklpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x14), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(packssdw, "packssdw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6B), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) int to short with saturation INST3(packsswb, "packsswb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x63), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) short to byte with saturation INST3(packuswb, "packuswb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x67), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) short to unsigned byte with saturation // id nm um mr mi rm flags INST3(dpps, "dpps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x40), INS_Flags_IsDstDstSrcAVXInstruction) // Packed dot product of two float vector regs INST3(dppd, "dppd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x41), INS_Flags_IsDstDstSrcAVXInstruction) // Packed dot product of two double vector regs INST3(insertps, "insertps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x21), INS_Flags_IsDstDstSrcAVXInstruction) // Insert packed single precision float value INST3(pcmpeqq, "pcmpeqq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x29), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 64-bit integers for equality INST3(pcmpgtq, "pcmpgtq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x37), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 64-bit integers for equality INST3(pmulld, "pmulld", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x40), INS_Flags_IsDstDstSrcAVXInstruction) // Packed multiply 32 bit unsigned integers and store lower 32 bits of each result INST3(ptest, "ptest", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x17), INS_FLAGS_None) // Packed logical compare INST3(phaddd, "phaddd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x02), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add INST3(pabsb, "pabsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1C), INS_FLAGS_None) // Packed absolute value of bytes INST3(pabsw, "pabsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1D), INS_FLAGS_None) // Packed absolute value of 16-bit integers INST3(pabsd, "pabsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1E), INS_FLAGS_None) // Packed absolute value of 32-bit integers INST3(palignr, "palignr", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0F), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Align Right INST3(pmaddubsw, "pmaddubsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x04), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Packed Signed and Unsigned Bytes INST3(pmulhrsw, "pmulhrsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0B), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Multiply High with Round and Scale INST3(pshufb, "pshufb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x00), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Shuffle Bytes INST3(psignb, "psignb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x08), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(psignw, "psignw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x09), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(psignd, "psignd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0A), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(pminsb, "pminsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x38), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum signed bytes INST3(pminsd, "pminsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x39), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 32-bit signed integers INST3(pminuw, "pminuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3A), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 16-bit unsigned integers INST3(pminud, "pminud", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3B), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 32-bit unsigned integers INST3(pmaxsb, "pmaxsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3C), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum signed bytes INST3(pmaxsd, "pmaxsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3D), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 32-bit signed integers INST3(pmaxuw, "pmaxuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3E), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 16-bit unsigned integers INST3(pmaxud, "pmaxud", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3F), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 32-bit unsigned integers INST3(pmovsxbw, "pmovsxbw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x20), INS_FLAGS_None) // Packed sign extend byte to short INST3(pmovsxbd, "pmovsxbd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x21), INS_FLAGS_None) // Packed sign extend byte to int INST3(pmovsxbq, "pmovsxbq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x22), INS_FLAGS_None) // Packed sign extend byte to long INST3(pmovsxwd, "pmovsxwd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x23), INS_FLAGS_None) // Packed sign extend short to int INST3(pmovsxwq, "pmovsxwq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x24), INS_FLAGS_None) // Packed sign extend short to long INST3(pmovsxdq, "pmovsxdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x25), INS_FLAGS_None) // Packed sign extend int to long INST3(pmovzxbw, "pmovzxbw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x30), INS_FLAGS_None) // Packed zero extend byte to short INST3(pmovzxbd, "pmovzxbd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x31), INS_FLAGS_None) // Packed zero extend byte to intg INST3(pmovzxbq, "pmovzxbq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x32), INS_FLAGS_None) // Packed zero extend byte to lon INST3(pmovzxwd, "pmovzxwd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x33), INS_FLAGS_None) // Packed zero extend short to int INST3(pmovzxwq, "pmovzxwq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x34), INS_FLAGS_None) // Packed zero extend short to long INST3(pmovzxdq, "pmovzxdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x35), INS_FLAGS_None) // Packed zero extend int to long INST3(packusdw, "packusdw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x2B), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) int to unsigned short with saturation INST3(roundps, "roundps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x08), INS_FLAGS_None) // Round packed single precision floating-point values INST3(roundss, "roundss", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0A), INS_Flags_IsDstSrcSrcAVXInstruction) // Round scalar single precision floating-point values INST3(roundpd, "roundpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x09), INS_FLAGS_None) // Round packed double precision floating-point values INST3(roundsd, "roundsd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0B), INS_Flags_IsDstSrcSrcAVXInstruction) // Round scalar double precision floating-point values INST3(pmuldq, "pmuldq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x28), INS_Flags_IsDstDstSrcAVXInstruction) // packed multiply 32-bit signed integers and store 64-bit result INST3(blendps, "blendps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0C), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Single Precision Floating-Point Values INST3(blendvps, "blendvps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x14), INS_FLAGS_None) // Variable Blend Packed Singles INST3(blendpd, "blendpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0D), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Double Precision Floating-Point Values INST3(blendvpd, "blendvpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x15), INS_FLAGS_None) // Variable Blend Packed Doubles INST3(pblendw, "pblendw", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0E), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Words INST3(pblendvb, "pblendvb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x10), INS_FLAGS_None) // Variable Blend Packed Bytes INST3(phaddw, "phaddw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x01), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add of 16-bit integers INST3(phsubw, "phsubw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x05), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 16-bit integers INST3(phsubd, "phsubd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x06), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 32-bit integers INST3(phaddsw, "phaddsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x03), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add of 16-bit integers with saturation INST3(phsubsw, "phsubsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x07), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 16-bit integers with saturation INST3(lddqu, "lddqu", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xF0), INS_FLAGS_None) // Load Unaligned integer INST3(movntdqa, "movntdqa", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x2A), INS_FLAGS_None) // Load Double Quadword Non-Temporal Aligned Hint INST3(movddup, "movddup", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x12), INS_FLAGS_None) // Replicate Double FP Values INST3(movsldup, "movsldup", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x12), INS_FLAGS_None) // Replicate even-indexed Single FP Values INST3(movshdup, "movshdup", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x16), INS_FLAGS_None) // Replicate odd-indexed Single FP Values INST3(phminposuw, "phminposuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x41), INS_FLAGS_None) // Packed Horizontal Word Minimum INST3(mpsadbw, "mpsadbw", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x42), INS_Flags_IsDstDstSrcAVXInstruction) // Compute Multiple Packed Sums of Absolute Difference INST3(pinsrb, "pinsrb", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x20), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Byte INST3(pinsrd, "pinsrd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x22), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Dword INST3(pinsrq, "pinsrq", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x22), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Qword INST3(pextrb, "pextrb", IUM_WR, SSE3A(0x14), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Byte INST3(pextrd, "pextrd", IUM_WR, SSE3A(0x16), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Dword INST3(pextrq, "pextrq", IUM_WR, SSE3A(0x16), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Qword INST3(pextrw_sse41, "pextrw", IUM_WR, SSE3A(0x15), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Word INST3(extractps, "extractps", IUM_WR, SSE3A(0x17), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Packed Floating-Point Values //PCLMULQDQ instructions INST3(pclmulqdq, "pclmulqdq" , IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x44), INS_Flags_IsDstDstSrcAVXInstruction) // Perform a carry-less multiplication of two quadwords //AES instructions INST3(aesdec, "aesdec", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDE), INS_Flags_IsDstDstSrcAVXInstruction) // Perform one round of an AES decryption flow INST3(aesdeclast, "aesdeclast", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDF), INS_Flags_IsDstDstSrcAVXInstruction) // Perform last round of an AES decryption flow INST3(aesenc, "aesenc", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDC), INS_Flags_IsDstDstSrcAVXInstruction) // Perform one round of an AES encryption flow INST3(aesenclast, "aesenclast", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDD), INS_Flags_IsDstDstSrcAVXInstruction) // Perform last round of an AES encryption flow INST3(aesimc, "aesimc", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDB), INS_FLAGS_None) // Perform the AES InvMixColumn Transformation INST3(aeskeygenassist, "aeskeygenassist", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0xDF), INS_FLAGS_None) // AES Round Key Generation Assist INST3(LAST_SSE_INSTRUCTION, "LAST_SSE_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(FIRST_AVX_INSTRUCTION, "FIRST_AVX_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // AVX only instructions INST3(vbroadcastss, "broadcastss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x18), INS_FLAGS_None) // Broadcast float value read from memory to entire ymm register INST3(vbroadcastsd, "broadcastsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x19), INS_FLAGS_None) // Broadcast float value read from memory to entire ymm register INST3(vpbroadcastb, "pbroadcastb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x78), INS_FLAGS_None) // Broadcast int8 value from reg/memory to entire ymm register INST3(vpbroadcastw, "pbroadcastw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x79), INS_FLAGS_None) // Broadcast int16 value from reg/memory to entire ymm register INST3(vpbroadcastd, "pbroadcastd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x58), INS_FLAGS_None) // Broadcast int32 value from reg/memory to entire ymm register INST3(vpbroadcastq, "pbroadcastq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x59), INS_FLAGS_None) // Broadcast int64 value from reg/memory to entire ymm register INST3(vextractf128, "extractf128", IUM_WR, SSE3A(0x19), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract 128-bit packed floating point values INST3(vextracti128, "extracti128", IUM_WR, SSE3A(0x39), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract 128-bit packed integer values INST3(vinsertf128, "insertf128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x18), INS_Flags_IsDstDstSrcAVXInstruction) // Insert 128-bit packed floating point values INST3(vinserti128, "inserti128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x38), INS_Flags_IsDstDstSrcAVXInstruction) // Insert 128-bit packed integer values INST3(vzeroupper, "zeroupper", IUM_WR, 0xC577F8, BAD_CODE, BAD_CODE, INS_FLAGS_None) // Zero upper 128-bits of all YMM regs (includes 2-byte fixed VEX prefix) INST3(vperm2i128, "perm2i128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x46), INS_Flags_IsDstDstSrcAVXInstruction) // Permute 128-bit halves of input register INST3(vpermq, "permq", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x00), INS_FLAGS_None) // Permute 64-bit of input register INST3(vpblendd, "pblendd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x02), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed DWORDs INST3(vblendvps, "blendvps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4A), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Singles INST3(vblendvpd, "blendvpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4B), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Doubles INST3(vpblendvb, "pblendvb", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4C), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Bytes INST3(vtestps, "testps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0E), INS_FLAGS_None) // Packed Bit Test INST3(vtestpd, "testpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0F), INS_FLAGS_None) // Packed Bit Test INST3(vpsrlvd, "psrlvd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x45), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Logical INST3(vpsrlvq, "psrlvq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x45), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Logical INST3(vpsravd, "psravd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x46), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Arithmetic INST3(vpsllvd, "psllvd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x47), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Left Logical INST3(vpsllvq, "psllvq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x47), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Left Logical INST3(vpermilps, "permilps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x04), INS_FLAGS_None) // Permute In-Lane of Quadruples of Single-Precision Floating-Point Values INST3(vpermilpd, "permilpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x05), INS_FLAGS_None) // Permute In-Lane of Quadruples of Double-Precision Floating-Point Values INST3(vpermilpsvar, "permilpsvar", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0C), INS_Flags_IsDstDstSrcAVXInstruction) // Permute In-Lane of Quadruples of Single-Precision Floating-Point Values INST3(vpermilpdvar, "permilpdvar", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0D), INS_Flags_IsDstDstSrcAVXInstruction) // Permute In-Lane of Quadruples of Double-Precision Floating-Point Values INST3(vperm2f128, "perm2f128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x06), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Floating-Point Values INST3(vpermpd, "permpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x01), INS_FLAGS_None) // Permute Double-Precision Floating-Point Values INST3(vpermd, "permd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x36), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Packed Doublewords Elements INST3(vpermps, "permps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x16), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Single-Precision Floating-Point Elements INST3(vbroadcastf128, "broadcastf128", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1A), INS_FLAGS_None) // Broadcast packed float values read from memory to entire ymm register INST3(vbroadcasti128, "broadcasti128", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x5A), INS_FLAGS_None) // Broadcast packed integer values read from memory to entire ymm register INST3(vmaskmovps, "maskmovps", IUM_WR, SSE38(0x2E), BAD_CODE, SSE38(0x2C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Packed Single-Precision Floating-Point Loads and Stores INST3(vmaskmovpd, "maskmovpd", IUM_WR, SSE38(0x2F), BAD_CODE, SSE38(0x2D), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Packed Double-Precision Floating-Point Loads and Stores INST3(vpmaskmovd, "pmaskmovd", IUM_WR, SSE38(0x8E), BAD_CODE, SSE38(0x8C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Integer Packed Dword Loads and Stores INST3(vpmaskmovq, "pmaskmovq", IUM_WR, SSE38(0x8E), BAD_CODE, SSE38(0x8C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Integer Packed Qword Loads and Stores INST3(vpgatherdd, "pgatherdd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x90), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword Values Using Signed Dword INST3(vpgatherqd, "pgatherqd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x91), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword Values Using Signed Qword INST3(vpgatherdq, "pgatherdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x90), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword with Signed Dword Indices INST3(vpgatherqq, "pgatherqq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x91), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Qword with Signed Dword Indices INST3(vgatherdps, "gatherdps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x92), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed SP FP values Using Signed Dword Indices INST3(vgatherqps, "gatherqps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x93), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed SP FP values Using Signed Qword Indices INST3(vgatherdpd, "gatherdpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x92), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed DP FP Values Using Signed Dword Indices INST3(vgatherqpd, "gatherqpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x93), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed DP FP Values Using Signed Qword Indices INST3(FIRST_FMA_INSTRUCTION, "FIRST_FMA_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // id nm um mr mi rm flags INST3(vfmadd132pd, "fmadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x98), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Packed Double-Precision Floating-Point Values INST3(vfmadd213pd, "fmadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231pd, "fmadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132ps, "fmadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x98), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Packed Single-Precision Floating-Point Values INST3(vfmadd213ps, "fmadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231ps, "fmadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132sd, "fmadd132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x99), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Scalar Double-Precision Floating-Point Values INST3(vfmadd213sd, "fmadd213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231sd, "fmadd231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132ss, "fmadd132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x99), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Scalar Single-Precision Floating-Point Values INST3(vfmadd213ss, "fmadd213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231ss, "fmadd231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub132pd, "fmaddsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x96), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values INST3(vfmaddsub213pd, "fmaddsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub231pd, "fmaddsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub132ps, "fmaddsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x96), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values INST3(vfmaddsub213ps, "fmaddsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub231ps, "fmaddsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd132pd, "fmsubadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x97), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values INST3(vfmsubadd213pd, "fmsubadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd231pd, "fmsubadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd132ps, "fmsubadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x97), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values INST3(vfmsubadd213ps, "fmsubadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd231ps, "fmsubadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132pd, "fmsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9A), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values INST3(vfmsub213pd, "fmsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231pd, "fmsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132ps, "fmsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9A), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values INST3(vfmsub213ps, "fmsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231ps, "fmsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132sd, "fmsub132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9B), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values INST3(vfmsub213sd, "fmsub213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231sd, "fmsub231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132ss, "fmsub132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9B), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values INST3(vfmsub213ss, "fmsub213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231ss, "fmsub231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132pd, "fnmadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9C), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values INST3(vfnmadd213pd, "fnmadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231pd, "fnmadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132ps, "fnmadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9C), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values INST3(vfnmadd213ps, "fnmadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231ps, "fnmadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132sd, "fnmadd132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9D), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values INST3(vfnmadd213sd, "fnmadd213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231sd, "fnmadd231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132ss, "fnmadd132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9D), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values INST3(vfnmadd213ss, "fnmadd213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231ss, "fnmadd231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132pd, "fnmsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9E), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values INST3(vfnmsub213pd, "fnmsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231pd, "fnmsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132ps, "fnmsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9E), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values INST3(vfnmsub213ps, "fnmsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231ps, "fnmsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132sd, "fnmsub132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9F), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values INST3(vfnmsub213sd, "fnmsub213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231sd, "fnmsub231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132ss, "fnmsub132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9F), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values INST3(vfnmsub213ss, "fnmsub213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231ss, "fnmsub231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(LAST_FMA_INSTRUCTION, "LAST_FMA_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(FIRST_AVXVNNI_INSTRUCTION, "FIRST_AVXVNNI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(vpdpbusd, "pdpbusd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x50), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Unsigned and Signed Bytes INST3(vpdpwssd, "pdpwssd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x52), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Signed Word Integers INST3(vpdpbusds, "pdpbusds", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x51), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Unsigned and Signed Bytes with Saturation INST3(vpdpwssds, "pdpwssds", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x53), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Signed Word Integers with Saturation INST3(LAST_AVXVNNI_INSTRUCTION, "LAST_AVXVNNI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // BMI1 INST3(FIRST_BMI_INSTRUCTION, "FIRST_BMI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(andn, "andn", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF2), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Resets_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Logical AND NOT INST3(blsi, "blsi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), INS_Flags_IsDstDstSrcAVXInstruction) // Extract Lowest Set Isolated Bit INST3(blsmsk, "blsmsk", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), INS_Flags_IsDstDstSrcAVXInstruction) // Get Mask Up to Lowest Set Bit INST3(blsr, "blsr", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Reset Lowest Set Bit INST3(bextr, "bextr", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF7), INS_Flags_IsDstDstSrcAVXInstruction) // Bit Field Extract // BMI2 INST3(rorx, "rorx", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0xF0), INS_FLAGS_None) INST3(pdep, "pdep", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Deposit INST3(pext, "pext", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Extract INST3(bzhi, "bzhi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Zero High Bits Starting with Specified Bit Position INST3(mulx, "mulx", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Unsigned Multiply Without Affecting Flags INST3(LAST_BMI_INSTRUCTION, "LAST_BMI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(LAST_AVX_INSTRUCTION, "LAST_AVX_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // Scalar instructions in SSE4.2 INST3(crc32, "crc32", IUM_WR, BAD_CODE, BAD_CODE, PACK4(0xF2, 0x0F, 0x38, 0xF0), INS_FLAGS_None) // BMI1 INST3(tzcnt, "tzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBC), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF ) // Count the Number of Trailing Zero Bits // LZCNT INST3(lzcnt, "lzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBD), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF ) // POPCNT INST3(popcnt, "popcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xB8), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Resets_PF | Resets_CF ) // id nm um mr mi flags INST2(ret, "ret", IUM_RD, 0x0000C3, 0x0000C2, INS_FLAGS_None ) INST2(loop, "loop", IUM_RD, BAD_CODE, 0x0000E2, INS_FLAGS_None ) INST2(call, "call", IUM_RD, 0x0010FF, 0x0000E8, INS_FLAGS_None ) INST2(rol, "rol", IUM_RW, 0x0000D2, BAD_CODE, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rol_1, "rol", IUM_RW, 0x0000D0, 0x0000D0, Writes_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rol_N, "rol", IUM_RW, 0x0000C0, 0x0000C0, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror, "ror", IUM_RW, 0x0008D2, BAD_CODE, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror_1, "ror", IUM_RW, 0x0008D0, 0x0008D0, Writes_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror_N, "ror", IUM_RW, 0x0008C0, 0x0008C0, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rcl, "rcl", IUM_RW, 0x0010D2, BAD_CODE, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcl_1, "rcl", IUM_RW, 0x0010D0, 0x0010D0, Writes_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcl_N, "rcl", IUM_RW, 0x0010C0, 0x0010C0, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr, "rcr", IUM_RW, 0x0018D2, BAD_CODE, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr_1, "rcr", IUM_RW, 0x0018D0, 0x0018D0, Writes_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr_N, "rcr", IUM_RW, 0x0018C0, 0x0018C0, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(shl, "shl", IUM_RW, 0x0020D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shl_1, "shl", IUM_RW, 0x0020D0, 0x0020D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shl_N, "shl", IUM_RW, 0x0020C0, 0x0020C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr, "shr", IUM_RW, 0x0028D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr_1, "shr", IUM_RW, 0x0028D0, 0x0028D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr_N, "shr", IUM_RW, 0x0028C0, 0x0028C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar, "sar", IUM_RW, 0x0038D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar_1, "sar", IUM_RW, 0x0038D0, 0x0038D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar_N, "sar", IUM_RW, 0x0038C0, 0x0038C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) // id nm um mr flags INST1(r_movsb, "rep movsb", IUM_RD, 0x00A4F3, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(r_movsd, "rep movsd", IUM_RD, 0x00A5F3, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(r_movsq, "rep movsq", IUM_RD, 0xF3A548, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(movsb, "movsb", IUM_RD, 0x0000A4, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(movsd, "movsd", IUM_RD, 0x0000A5, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(movsq, "movsq", IUM_RD, 0x00A548, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(r_stosb, "rep stosb", IUM_RD, 0x00AAF3, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(r_stosd, "rep stosd", IUM_RD, 0x00ABF3, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(r_stosq, "rep stosq", IUM_RD, 0xF3AB48, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(stosb, "stosb", IUM_RD, 0x0000AA, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(stosd, "stosd", IUM_RD, 0x0000AB, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(stosq, "stosq", IUM_RD, 0x00AB48, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(int3, "int3", IUM_RD, 0x0000CC, INS_FLAGS_None ) INST1(nop, "nop", IUM_RD, 0x000090, INS_FLAGS_None ) INST1(pause, "pause", IUM_RD, 0x0090F3, INS_FLAGS_None ) INST1(lock, "lock", IUM_RD, 0x0000F0, INS_FLAGS_None ) INST1(leave, "leave", IUM_RD, 0x0000C9, INS_FLAGS_None ) INST1(neg, "neg", IUM_RW, 0x0018F6, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(not, "not", IUM_RW, 0x0010F6, INS_FLAGS_None | INS_FLAGS_Has_Wbit ) INST1(cwde, "cwde", IUM_RD, 0x000098, INS_FLAGS_None ) INST1(cdq, "cdq", IUM_RD, 0x000099, INS_FLAGS_None ) INST1(idiv, "idiv", IUM_RD, 0x0038F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF | INS_FLAGS_Has_Wbit ) INST1(imulEAX, "imul", IUM_RD, 0x0028F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(div, "div", IUM_RD, 0x0030F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF | INS_FLAGS_Has_Wbit ) INST1(mulEAX, "mul", IUM_RD, 0x0020F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(sahf, "sahf", IUM_RD, 0x00009E, Restore_SF_ZF_AF_PF_CF ) INST1(xadd, "xadd", IUM_RW, 0x0F00C0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(cmpxchg, "cmpxchg", IUM_RW, 0x0F00B0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(shld, "shld", IUM_RW, 0x0F00A4, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF ) INST1(shrd, "shrd", IUM_RW, 0x0F00AC, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF ) // For RyuJIT/x86, we follow the x86 calling convention that requires // us to return floating point value on the x87 FP stack, so we need // these instructions regardless of whether we're using full stack fp. #ifdef TARGET_X86 INST1(fld, "fld", IUM_WR, 0x0000D9, INS_FLAGS_x87Instr) INST1(fstp, "fstp", IUM_WR, 0x0018D9, INS_FLAGS_x87Instr) #endif // TARGET_X86 INST1(seto, "seto", IUM_WR, 0x0F0090, Reads_OF ) INST1(setno, "setno", IUM_WR, 0x0F0091, Reads_OF ) INST1(setb, "setb", IUM_WR, 0x0F0092, Reads_CF ) INST1(setae, "setae", IUM_WR, 0x0F0093, Reads_CF ) INST1(sete, "sete", IUM_WR, 0x0F0094, Reads_ZF ) INST1(setne, "setne", IUM_WR, 0x0F0095, Reads_ZF ) INST1(setbe, "setbe", IUM_WR, 0x0F0096, Reads_ZF | Reads_CF ) INST1(seta, "seta", IUM_WR, 0x0F0097, Reads_ZF | Reads_CF ) INST1(sets, "sets", IUM_WR, 0x0F0098, Reads_SF ) INST1(setns, "setns", IUM_WR, 0x0F0099, Reads_SF ) INST1(setp, "setp", IUM_WR, 0x0F009A, Reads_PF ) INST1(setnp, "setnp", IUM_WR, 0x0F009B, Reads_PF ) INST1(setl, "setl", IUM_WR, 0x0F009C, Reads_OF | Reads_SF ) INST1(setge, "setge", IUM_WR, 0x0F009D, Reads_OF | Reads_SF ) INST1(setle, "setle", IUM_WR, 0x0F009E, Reads_OF | Reads_SF | Reads_ZF ) INST1(setg, "setg", IUM_WR, 0x0F009F, Reads_OF | Reads_SF | Reads_ZF ) // Indirect jump used for tailcalls. We differentiate between func-internal // indirect jump (e.g. used for switch) and tailcall indirect jumps because the // x64 unwinder might require the latter to be rex.w prefixed. INST1(tail_i_jmp, "tail.jmp", IUM_RD, 0x0020FF, INS_FLAGS_None ) INST1(i_jmp, "jmp", IUM_RD, 0x0020FF, INS_FLAGS_None ) INST0(jmp, "jmp", IUM_RD, 0x0000EB, INS_FLAGS_None ) INST0(jo, "jo", IUM_RD, 0x000070, Reads_OF ) INST0(jno, "jno", IUM_RD, 0x000071, Reads_OF ) INST0(jb, "jb", IUM_RD, 0x000072, Reads_CF ) INST0(jae, "jae", IUM_RD, 0x000073, Reads_CF ) INST0(je, "je", IUM_RD, 0x000074, Reads_ZF ) INST0(jne, "jne", IUM_RD, 0x000075, Reads_ZF ) INST0(jbe, "jbe", IUM_RD, 0x000076, Reads_ZF | Reads_CF ) INST0(ja, "ja", IUM_RD, 0x000077, Reads_ZF | Reads_CF ) INST0(js, "js", IUM_RD, 0x000078, Reads_SF ) INST0(jns, "jns", IUM_RD, 0x000079, Reads_SF ) INST0(jp, "jp", IUM_RD, 0x00007A, Reads_PF ) INST0(jnp, "jnp", IUM_RD, 0x00007B, Reads_PF ) INST0(jl, "jl", IUM_RD, 0x00007C, Reads_OF | Reads_SF ) INST0(jge, "jge", IUM_RD, 0x00007D, Reads_OF | Reads_SF ) INST0(jle, "jle", IUM_RD, 0x00007E, Reads_OF | Reads_SF | Reads_ZF ) INST0(jg, "jg", IUM_RD, 0x00007F, Reads_OF | Reads_SF | Reads_ZF ) INST0(l_jmp, "jmp", IUM_RD, 0x0000E9, INS_FLAGS_None ) INST0(l_jo, "jo", IUM_RD, 0x00800F, Reads_OF ) INST0(l_jno, "jno", IUM_RD, 0x00810F, Reads_OF ) INST0(l_jb, "jb", IUM_RD, 0x00820F, Reads_CF ) INST0(l_jae, "jae", IUM_RD, 0x00830F, Reads_CF ) INST0(l_je, "je", IUM_RD, 0x00840F, Reads_ZF ) INST0(l_jne, "jne", IUM_RD, 0x00850F, Reads_ZF ) INST0(l_jbe, "jbe", IUM_RD, 0x00860F, Reads_ZF | Reads_CF ) INST0(l_ja, "ja", IUM_RD, 0x00870F, Reads_ZF | Reads_CF ) INST0(l_js, "js", IUM_RD, 0x00880F, Reads_SF ) INST0(l_jns, "jns", IUM_RD, 0x00890F, Reads_SF ) INST0(l_jp, "jp", IUM_RD, 0x008A0F, Reads_PF ) INST0(l_jnp, "jnp", IUM_RD, 0x008B0F, Reads_PF ) INST0(l_jl, "jl", IUM_RD, 0x008C0F, Reads_OF | Reads_SF ) INST0(l_jge, "jge", IUM_RD, 0x008D0F, Reads_OF | Reads_SF ) INST0(l_jle, "jle", IUM_RD, 0x008E0F, Reads_OF | Reads_SF | Reads_ZF ) INST0(l_jg, "jg", IUM_RD, 0x008F0F, Reads_OF | Reads_SF | Reads_ZF ) INST0(align, "align", IUM_RD, BAD_CODE, INS_FLAGS_None) /*****************************************************************************/ #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 /*****************************************************************************/ // clang-format on
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file was previously known as instrs.h // /***************************************************************************** * x86 instructions for the JIT compiler * * id -- the enum name for the instruction * nm -- textual name (for assembly dipslay) * um -- update mode, see IUM_xx enum (rd, wr, or rw) * mr -- base encoding for R/M[reg] addressing mode * mi -- base encoding for R/M,icon addressing mode * rm -- base encoding for reg,R/M addressing mode * a4 -- base encoding for eax,i32 addressing mode * rr -- base encoding for register addressing mode * flags -- flags, see INS_FLAGS_* enum * ******************************************************************************/ // clang-format off #if !defined(TARGET_XARCH) #error Unexpected target type #endif #ifndef INST1 #error At least INST1 must be defined before including this file. #endif /*****************************************************************************/ #ifndef INST0 #define INST0(id, nm, um, mr, flags) #endif #ifndef INST2 #define INST2(id, nm, um, mr, mi, flags) #endif #ifndef INST3 #define INST3(id, nm, um, mr, mi, rm, flags) #endif #ifndef INST4 #define INST4(id, nm, um, mr, mi, rm, a4, flags) #endif #ifndef INST5 #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) #endif /*****************************************************************************/ /* The following is x86-specific */ /*****************************************************************************/ // id nm um mr mi rm a4 rr flags INST5(invalid, "INVALID", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST5(push, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None ) INST5(pop, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None ) // Does not affect the stack tracking in the emitter INST5(push_hide, "push", IUM_RD, 0x0030FE, 0x000068, BAD_CODE, BAD_CODE, 0x000050, INS_FLAGS_None ) INST5(pop_hide, "pop", IUM_WR, 0x00008E, BAD_CODE, BAD_CODE, BAD_CODE, 0x000058, INS_FLAGS_None ) INST5(inc, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000040, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | INS_FLAGS_Has_Wbit ) INST5(inc_l, "inc", IUM_RW, 0x0000FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C0FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF ) INST5(dec, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x000048, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | INS_FLAGS_Has_Wbit ) INST5(dec_l, "dec", IUM_RW, 0x0008FE, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C8FE, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF ) // Multi-byte opcodes without modrm are represented in mixed endian fashion. // See comment around quarter way through this file for more information. INST5(bswap, "bswap", IUM_RW, 0x0F00C8, BAD_CODE, BAD_CODE, BAD_CODE, 0x00C80F, INS_FLAGS_None ) // id nm um mr mi rm a4 flags INST4(add, "add", IUM_RW, 0x000000, 0x000080, 0x000002, 0x000004, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(or, "or", IUM_RW, 0x000008, 0x000880, 0x00000A, 0x00000C, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(adc, "adc", IUM_RW, 0x000010, 0x001080, 0x000012, 0x000014, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | Reads_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(sbb, "sbb", IUM_RW, 0x000018, 0x001880, 0x00001A, 0x00001C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | Reads_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(and, "and", IUM_RW, 0x000020, 0x002080, 0x000022, 0x000024, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(sub, "sub", IUM_RW, 0x000028, 0x002880, 0x00002A, 0x00002C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(xor, "xor", IUM_RW, 0x000030, 0x003080, 0x000032, 0x000034, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(cmp, "cmp", IUM_RD, 0x000038, 0x003880, 0x00003A, 0x00003C, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Sbit | INS_FLAGS_Has_Wbit ) INST4(test, "test", IUM_RD, 0x000084, 0x0000F6, 0x000084, 0x0000A8, Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Resets_CF | INS_FLAGS_Has_Wbit ) INST4(mov, "mov", IUM_WR, 0x000088, 0x0000C6, 0x00008A, 0x0000B0, INS_FLAGS_Has_Wbit ) INST4(lea, "lea", IUM_WR, BAD_CODE, BAD_CODE, 0x00008D, BAD_CODE, INS_FLAGS_None ) // id nm um mr mi rm flags // Note that emitter has only partial support for BT. It can only emit the reg,reg form // and the registers need to be reversed to get the correct encoding. INST3(bt, "bt", IUM_RD, 0x0F00A3, BAD_CODE, 0x0F00A3, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF ) INST3(bsf, "bsf", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BC, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF ) INST3(bsr, "bsr", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BD, Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Undefined_CF ) INST3(movsx, "movsx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BE, INS_FLAGS_Has_Wbit ) #ifdef TARGET_AMD64 INST3(movsxd, "movsxd", IUM_WR, BAD_CODE, BAD_CODE, 0x4800000063, INS_FLAGS_Has_Wbit ) #endif INST3(movzx, "movzx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00B6, INS_FLAGS_Has_Wbit ) INST3(cmovo, "cmovo", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0040, Reads_OF ) INST3(cmovno, "cmovno", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0041, Reads_OF ) INST3(cmovb, "cmovb", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0042, Reads_CF ) INST3(cmovae, "cmovae", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0043, Reads_CF ) INST3(cmove, "cmove", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0044, Reads_ZF ) INST3(cmovne, "cmovne", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0045, Reads_ZF ) INST3(cmovbe, "cmovbe", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0046, Reads_ZF | Reads_CF ) INST3(cmova, "cmova", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0047, Reads_ZF | Reads_CF ) INST3(cmovs, "cmovs", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0048, Reads_SF ) INST3(cmovns, "cmovns", IUM_WR, BAD_CODE, BAD_CODE, 0x0F0049, Reads_SF ) INST3(cmovp, "cmovp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004A, Reads_PF ) INST3(cmovnp, "cmovnp", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004B, Reads_PF ) INST3(cmovl, "cmovl", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004C, Reads_OF | Reads_SF ) INST3(cmovge, "cmovge", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004D, Reads_OF | Reads_SF ) INST3(cmovle, "cmovle", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004E, Reads_OF | Reads_SF | Reads_ZF ) INST3(cmovg, "cmovg", IUM_WR, BAD_CODE, BAD_CODE, 0x0F004F, Reads_OF | Reads_SF | Reads_ZF ) INST3(xchg, "xchg", IUM_RW, 0x000086, BAD_CODE, 0x000086, INS_FLAGS_Has_Wbit ) INST3(imul, "imul", IUM_RW, 0x0F00AC, BAD_CODE, 0x0F00AF, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) // id nm um mr mi rm flags // Instead of encoding these as 3-operand instructions, we encode them // as 2-operand instructions with the target register being implicit // implicit_reg = op1*op2_icon #define INSTMUL INST3 INSTMUL(imul_AX, "imul", IUM_RD, BAD_CODE, 0x000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_CX, "imul", IUM_RD, BAD_CODE, 0x000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_DX, "imul", IUM_RD, BAD_CODE, 0x001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_BX, "imul", IUM_RD, BAD_CODE, 0x001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_SP, "imul", IUM_RD, BAD_CODE, BAD_CODE, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_BP, "imul", IUM_RD, BAD_CODE, 0x002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_SI, "imul", IUM_RD, BAD_CODE, 0x003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_DI, "imul", IUM_RD, BAD_CODE, 0x003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) #ifdef TARGET_AMD64 INSTMUL(imul_08, "imul", IUM_RD, BAD_CODE, 0x4400000068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_09, "imul", IUM_RD, BAD_CODE, 0x4400000868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_10, "imul", IUM_RD, BAD_CODE, 0x4400001068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_11, "imul", IUM_RD, BAD_CODE, 0x4400001868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_12, "imul", IUM_RD, BAD_CODE, 0x4400002068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_13, "imul", IUM_RD, BAD_CODE, 0x4400002868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_14, "imul", IUM_RD, BAD_CODE, 0x4400003068, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) INSTMUL(imul_15, "imul", IUM_RD, BAD_CODE, 0x4400003868, BAD_CODE, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Sbit ) #endif // TARGET_AMD64 // the hex codes in this file represent the instruction encoding as follows: // 0x0000ff00 - modrm byte position // 0x000000ff - last byte of opcode (before modrm) // 0x00ff0000 - first byte of opcode // 0xff000000 - middle byte of opcode, if needed (after first, before last) // // So a 1-byte opcode is: and with modrm: // 0x00000011 0x0000RM11 // // So a 2-byte opcode is: and with modrm: // 0x00002211 0x0011RM22 // // So a 3-byte opcode is: and with modrm: // 0x00113322 0x2211RM33 // // So a 4-byte opcode would be something like this: // 0x22114433 #define PACK3(byte1,byte2,byte3) (((byte1) << 16) | ((byte2) << 24) | (byte3)) #define PACK2(byte1,byte2) (((byte1) << 16) | (byte2)) #define SSEFLT(c) PACK3(0xf3, 0x0f, c) #define SSEDBL(c) PACK3(0xf2, 0x0f, c) #define PCKDBL(c) PACK3(0x66, 0x0f, c) #define PCKFLT(c) PACK2(0x0f,c) // These macros encode extra byte that is implicit in the macro. #define PACK4(byte1,byte2,byte3,byte4) (((byte1) << 16) | ((byte2) << 24) | (byte3) | ((byte4) << 8)) #define SSE38(c) PACK4(0x66, 0x0f, 0x38, c) #define SSE3A(c) PACK4(0x66, 0x0f, 0x3A, c) // VEX* encodes the implied leading opcode bytes in c1: // 1: implied 0f, 2: implied 0f 38, 3: implied 0f 3a #define VEX2INT(c1,c2) PACK3(c1, 0xc5, c2) #define VEX3INT(c1,c2) PACK4(c1, 0xc5, 0x02, c2) #define VEX3FLT(c1,c2) PACK4(c1, 0xc5, 0x02, c2) INST3(FIRST_SSE_INSTRUCTION, "FIRST_SSE_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // These are the SSE instructions used on x86 INST3(pmovmskb, "pmovmskb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD7), INS_FLAGS_None) // Move the MSB bits of all bytes in a xmm reg to an int reg INST3(movmskpd, "movmskpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x50), INS_FLAGS_None) // Extract 2-bit sign mask from xmm and store in reg. The upper bits of r32 or r64 are filled with zeros. INST3(movd, "movd", IUM_WR, PCKDBL(0x7E), BAD_CODE, PCKDBL(0x6E), INS_FLAGS_None) // Move Double/Quadword between mm regs <-> memory/r32/r64 regs, cleanup https://github.com/dotnet/runtime/issues/47943 INST3(movq, "movq", IUM_WR, PCKDBL(0xD6), BAD_CODE, SSEFLT(0x7E), INS_FLAGS_None) // Move Quadword between memory/mm <-> regs, cleanup https://github.com/dotnet/runtime/issues/47943 INST3(movsdsse2, "movsd", IUM_WR, SSEDBL(0x11), BAD_CODE, SSEDBL(0x10), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(punpckldq, "punpckldq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x62), INS_Flags_IsDstDstSrcAVXInstruction) INST3(xorps, "xorps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x57), INS_Flags_IsDstDstSrcAVXInstruction) // XOR packed singles INST3(cvttsd2si, "cvttsd2si", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2C), INS_FLAGS_None) // cvt with trunc scalar double to signed DWORDs INST3(movntdq, "movntdq", IUM_WR, PCKDBL(0xE7), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movnti, "movnti", IUM_WR, PCKFLT(0xC3), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movntpd, "movntpd", IUM_WR, PCKDBL(0x2B), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movntps, "movntps", IUM_WR, PCKFLT(0x2B), BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(movdqu, "movdqu", IUM_WR, SSEFLT(0x7F), BAD_CODE, SSEFLT(0x6F), INS_FLAGS_None) INST3(movdqa, "movdqa", IUM_WR, PCKDBL(0x7F), BAD_CODE, PCKDBL(0x6F), INS_FLAGS_None) INST3(movlpd, "movlpd", IUM_WR, PCKDBL(0x13), BAD_CODE, PCKDBL(0x12), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movlps, "movlps", IUM_WR, PCKFLT(0x13), BAD_CODE, PCKFLT(0x12), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movhpd, "movhpd", IUM_WR, PCKDBL(0x17), BAD_CODE, PCKDBL(0x16), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movhps, "movhps", IUM_WR, PCKFLT(0x17), BAD_CODE, PCKFLT(0x16), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movss, "movss", IUM_WR, SSEFLT(0x11), BAD_CODE, SSEFLT(0x10), INS_Flags_IsDstSrcSrcAVXInstruction) INST3(movapd, "movapd", IUM_WR, PCKDBL(0x29), BAD_CODE, PCKDBL(0x28), INS_FLAGS_None) INST3(movaps, "movaps", IUM_WR, PCKFLT(0x29), BAD_CODE, PCKFLT(0x28), INS_FLAGS_None) INST3(movupd, "movupd", IUM_WR, PCKDBL(0x11), BAD_CODE, PCKDBL(0x10), INS_FLAGS_None) INST3(movups, "movups", IUM_WR, PCKFLT(0x11), BAD_CODE, PCKFLT(0x10), INS_FLAGS_None) INST3(movhlps, "movhlps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x12), INS_Flags_IsDstDstSrcAVXInstruction) INST3(movlhps, "movlhps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x16), INS_Flags_IsDstDstSrcAVXInstruction) INST3(movmskps, "movmskps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x50), INS_FLAGS_None) INST3(unpckhps, "unpckhps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x15), INS_Flags_IsDstDstSrcAVXInstruction) INST3(unpcklps, "unpcklps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x14), INS_Flags_IsDstDstSrcAVXInstruction) INST3(maskmovdqu, "maskmovdqu", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF7), INS_FLAGS_None) INST3(shufps, "shufps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0xC6), INS_Flags_IsDstDstSrcAVXInstruction) INST3(shufpd, "shufpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC6), INS_Flags_IsDstDstSrcAVXInstruction) INST3(punpckhdq, "punpckhdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6A), INS_Flags_IsDstDstSrcAVXInstruction) INST3(lfence, "lfence", IUM_RD, 0x000FE8AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(mfence, "mfence", IUM_RD, 0x000FF0AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetchnta, "prefetchnta", IUM_RD, 0x000F0018, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht0, "prefetcht0", IUM_RD, 0x000F0818, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht1, "prefetcht1", IUM_RD, 0x000F1018, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(prefetcht2, "prefetcht2", IUM_RD, 0x000F1818, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(sfence, "sfence", IUM_RD, 0x000FF8AE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // SSE 2 arith INST3(addps, "addps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed singles INST3(addss, "addss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add scalar singles INST3(addpd, "addpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed doubles INST3(addsd, "addsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x58), INS_Flags_IsDstDstSrcAVXInstruction) // Add scalar doubles INST3(mulps, "mulps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed singles INST3(mulss, "mulss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply scalar single INST3(mulpd, "mulpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed doubles INST3(mulsd, "mulsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x59), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply scalar doubles INST3(subps, "subps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed singles INST3(subss, "subss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract scalar singles INST3(subpd, "subpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed doubles INST3(subsd, "subsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5C), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract scalar doubles INST3(minps, "minps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum packed singles INST3(minss, "minss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum scalar single INST3(minpd, "minpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum packed doubles INST3(minsd, "minsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5D), INS_Flags_IsDstDstSrcAVXInstruction) // Return Minimum scalar double INST3(divps, "divps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide packed singles INST3(divss, "divss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide scalar singles INST3(divpd, "divpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide packed doubles INST3(divsd, "divsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5E), INS_Flags_IsDstDstSrcAVXInstruction) // Divide scalar doubles INST3(maxps, "maxps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum packed singles INST3(maxss, "maxss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum scalar single INST3(maxpd, "maxpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum packed doubles INST3(maxsd, "maxsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5F), INS_Flags_IsDstDstSrcAVXInstruction) // Return Maximum scalar double INST3(xorpd, "xorpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x57), INS_Flags_IsDstDstSrcAVXInstruction) // XOR packed doubles INST3(andps, "andps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x54), INS_Flags_IsDstDstSrcAVXInstruction) // AND packed singles INST3(andpd, "andpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x54), INS_Flags_IsDstDstSrcAVXInstruction) // AND packed doubles INST3(sqrtps, "sqrtps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x51), INS_FLAGS_None) // Sqrt of packed singles INST3(sqrtss, "sqrtss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x51), INS_Flags_IsDstSrcSrcAVXInstruction) // Sqrt of scalar single INST3(sqrtpd, "sqrtpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x51), INS_FLAGS_None) // Sqrt of packed doubles INST3(sqrtsd, "sqrtsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x51), INS_Flags_IsDstSrcSrcAVXInstruction) // Sqrt of scalar double INST3(andnps, "andnps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x55), INS_Flags_IsDstDstSrcAVXInstruction) // And-Not packed singles INST3(andnpd, "andnpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x55), INS_Flags_IsDstDstSrcAVXInstruction) // And-Not packed doubles INST3(orps, "orps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x56), INS_Flags_IsDstDstSrcAVXInstruction) // Or packed singles INST3(orpd, "orpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x56), INS_Flags_IsDstDstSrcAVXInstruction) // Or packed doubles INST3(haddpd, "haddpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x7C), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal add packed doubles INST3(haddps, "haddps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x7C), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal add packed floats INST3(hsubpd, "hsubpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x7D), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal subtract packed doubles INST3(hsubps, "hsubps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x7D), INS_Flags_IsDstDstSrcAVXInstruction) // Horizontal subtract packed floats INST3(addsubps, "addsubps", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xD0), INS_Flags_IsDstDstSrcAVXInstruction) // Add/Subtract packed singles INST3(addsubpd, "addsubpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD0), INS_Flags_IsDstDstSrcAVXInstruction) // Add/Subtract packed doubles // SSE 2 approx arith INST3(rcpps, "rcpps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x53), INS_FLAGS_None) // Reciprocal of packed singles INST3(rcpss, "rcpss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x53), INS_Flags_IsDstSrcSrcAVXInstruction) // Reciprocal of scalar single INST3(rsqrtps, "rsqrtps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x52), INS_FLAGS_None) // Reciprocal Sqrt of packed singles INST3(rsqrtss, "rsqrtss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x52), INS_Flags_IsDstSrcSrcAVXInstruction) // Reciprocal Sqrt of scalar single // SSE2 conversions INST3(cvtpi2ps, "cvtpi2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2A), INS_FLAGS_None) // cvt packed DWORDs to singles INST3(cvtsi2ss, "cvtsi2ss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt DWORD to scalar single INST3(cvtpi2pd, "cvtpi2pd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2A), INS_FLAGS_None) // cvt packed DWORDs to doubles INST3(cvtsi2sd, "cvtsi2sd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt DWORD to scalar double INST3(cvttps2pi, "cvttps2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2C), INS_FLAGS_None) // cvt with trunc packed singles to DWORDs INST3(cvttss2si, "cvttss2si", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2C), INS_FLAGS_None) // cvt with trunc scalar single to DWORD INST3(cvttpd2pi, "cvttpd2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2C), INS_FLAGS_None) // cvt with trunc packed doubles to DWORDs INST3(cvtps2pi, "cvtps2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x2D), INS_FLAGS_None) // cvt packed singles to DWORDs INST3(cvtss2si, "cvtss2si", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x2D), INS_FLAGS_None) // cvt scalar single to DWORD INST3(cvtpd2pi, "cvtpd2pi", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x2D), INS_FLAGS_None) // cvt packed doubles to DWORDs INST3(cvtsd2si, "cvtsd2si", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x2D), INS_FLAGS_None) // cvt scalar double to DWORD INST3(cvtps2pd, "cvtps2pd", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5A), INS_FLAGS_None) // cvt packed singles to doubles INST3(cvtpd2ps, "cvtpd2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5A), INS_FLAGS_None) // cvt packed doubles to singles INST3(cvtss2sd, "cvtss2sd", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt scalar single to scalar doubles INST3(cvtsd2ss, "cvtsd2ss", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x5A), INS_Flags_IsDstDstSrcAVXInstruction) // cvt scalar double to scalar singles INST3(cvtdq2ps, "cvtdq2ps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0x5B), INS_FLAGS_None) // cvt packed DWORDs to singles INST3(cvtps2dq, "cvtps2dq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x5B), INS_FLAGS_None) // cvt packed singles to DWORDs INST3(cvttps2dq, "cvttps2dq", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x5B), INS_FLAGS_None) // cvt with trunc packed singles to DWORDs INST3(cvtpd2dq, "cvtpd2dq", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xE6), INS_FLAGS_None) // cvt packed doubles to DWORDs INST3(cvttpd2dq, "cvttpd2dq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE6), INS_FLAGS_None) // cvt with trunc packed doubles to DWORDs INST3(cvtdq2pd, "cvtdq2pd", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xE6), INS_FLAGS_None) // cvt packed DWORDs to doubles // SSE2 comparison instructions INST3(comiss, "comiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare singles INST3(comisd, "comisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2F), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // ordered compare doubles INST3(ucomiss, "ucomiss", IUM_RD, BAD_CODE, BAD_CODE, PCKFLT(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare singles INST3(ucomisd, "ucomisd", IUM_RD, BAD_CODE, BAD_CODE, PCKDBL(0x2E), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Writes_PF | Writes_CF ) // unordered compare doubles // SSE2 packed single/double comparison operations. // Note that these instructions not only compare but also overwrite the first source. INST3(cmpps, "cmpps", IUM_WR, BAD_CODE, BAD_CODE, PCKFLT(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare packed singles INST3(cmppd, "cmppd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare packed doubles INST3(cmpss, "cmpss", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare scalar singles INST3(cmpsd, "cmpsd", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xC2), INS_Flags_IsDstDstSrcAVXInstruction) // compare scalar doubles //SSE2 packed integer operations INST3(paddb, "paddb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed byte integers INST3(paddw, "paddw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFD), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed word (16-bit) integers INST3(paddd, "paddd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFE), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed double-word (32-bit) integers INST3(paddq, "paddq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD4), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed quad-word (64-bit) integers INST3(paddsb, "paddsb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed signed byte integers and saturate the results INST3(paddsw, "paddsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xED), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed signed word integers and saturate the results INST3(paddusb, "paddusb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDC), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed unsigned byte integers and saturate the results INST3(paddusw, "paddusw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDD), INS_Flags_IsDstDstSrcAVXInstruction) // Add packed unsigned word integers and saturate the results INST3(pavgb, "pavgb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE0), INS_Flags_IsDstDstSrcAVXInstruction) // Average of packed byte integers INST3(pavgw, "pavgw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE3), INS_Flags_IsDstDstSrcAVXInstruction) // Average of packed word integers INST3(psubb, "psubb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed word (16-bit) integers INST3(psubw, "psubw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed word (16-bit) integers INST3(psubd, "psubd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFA), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed double-word (32-bit) integers INST3(psubq, "psubq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xFB), INS_Flags_IsDstDstSrcAVXInstruction) // subtract packed quad-word (64-bit) integers INST3(pmaddwd, "pmaddwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst INST3(pmulhw, "pmulhw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE5), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply high the packed 16-bit signed integers INST3(pmulhuw, "pmulhuw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE4), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply high the packed 16-bit unsigned integers INST3(pmuludq, "pmuludq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF4), INS_Flags_IsDstDstSrcAVXInstruction) // packed multiply 32-bit unsigned integers and store 64-bit result INST3(pmullw, "pmullw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD5), INS_Flags_IsDstDstSrcAVXInstruction) // Packed multiply 16 bit unsigned integers and store lower 16 bits of each result INST3(pand, "pand", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDB), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise AND of two xmm regs INST3(pandn, "pandn", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDF), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise AND NOT of two xmm regs INST3(por, "por", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEB), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise OR of two xmm regs INST3(pxor, "pxor", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEF), INS_Flags_IsDstDstSrcAVXInstruction) // Packed bit-wise XOR of two xmm regs INST3(psadbw, "psadbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Compute the sum of absolute differences of packed unsigned 8-bit integers INST3(psubsb, "psubsb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed 8-bit integers in b from packed 8-bit integers in a using saturation INST3(psubusb, "psubusb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD8), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation INST3(psubsw, "psubsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xE9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed 16-bit integers in b from packed 16-bit integers in a using saturation INST3(psubusw, "psubusw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xD9), INS_Flags_IsDstDstSrcAVXInstruction) // Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation // Note that the shift immediates share the same encoding between left and right-shift, and are distinguished by the Reg/Opcode, // which is handled in emitxarch.cpp. INST3(psrldq, "psrldq", IUM_WR, BAD_CODE, PCKDBL(0x73), BAD_CODE, INS_Flags_IsDstDstSrcAVXInstruction) // Shift right logical of xmm reg by given number of bytes INST3(pslldq, "pslldq", IUM_WR, BAD_CODE, PCKDBL(0x73), BAD_CODE, INS_Flags_IsDstDstSrcAVXInstruction) // Shift left logical of xmm reg by given number of bytes INST3(psllw, "psllw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xF1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 16-bit integers INST3(pslld, "pslld", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xF2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 32-bit integers INST3(psllq, "psllq", IUM_WR, BAD_CODE, PCKDBL(0x73), PCKDBL(0xF3), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift left logical of 64-bit integers INST3(psrlw, "psrlw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xD1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 16-bit integers INST3(psrld, "psrld", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xD2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 32-bit integers INST3(psrlq, "psrlq", IUM_WR, BAD_CODE, PCKDBL(0x73), PCKDBL(0xD3), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right logical of 64-bit integers INST3(psraw, "psraw", IUM_WR, BAD_CODE, PCKDBL(0x71), PCKDBL(0xE1), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right arithmetic of 16-bit integers INST3(psrad, "psrad", IUM_WR, BAD_CODE, PCKDBL(0x72), PCKDBL(0xE2), INS_Flags_IsDstDstSrcAVXInstruction) // Packed shift right arithmetic of 32-bit integers INST3(pmaxub, "pmaxub", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDE), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum unsigned bytes INST3(pminub, "pminub", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xDA), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum unsigned bytes INST3(pmaxsw, "pmaxsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEE), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum signed words INST3(pminsw, "pminsw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xEA), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum signed words INST3(pcmpeqd, "pcmpeqd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x76), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 32-bit integers for equality INST3(pcmpgtd, "pcmpgtd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x66), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 32-bit signed integers for greater than INST3(pcmpeqw, "pcmpeqw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x75), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 16-bit integers for equality INST3(pcmpgtw, "pcmpgtw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x65), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 16-bit signed integers for greater than INST3(pcmpeqb, "pcmpeqb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x74), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 8-bit integers for equality INST3(pcmpgtb, "pcmpgtb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x64), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 8-bit signed integers for greater than INST3(pshufd, "pshufd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x70), INS_FLAGS_None) // Packed shuffle of 32-bit integers INST3(pshufhw, "pshufhw", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x70), INS_FLAGS_None) // Shuffle the high words in xmm2/m128 based on the encoding in imm8 and store the result in xmm1. INST3(pshuflw, "pshuflw", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x70), INS_FLAGS_None) // Shuffle the low words in xmm2/m128 based on the encoding in imm8 and store the result in xmm1. INST3(pextrw, "pextrw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC5), INS_FLAGS_None) // Extract 16-bit value into a r32 with zero extended to 32-bits INST3(pinsrw, "pinsrw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0xC4), INS_Flags_IsDstDstSrcAVXInstruction) // Insert word at index INST3(punpckhbw, "punpckhbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x68), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(punpcklbw, "punpcklbw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x60), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (lo) INST3(punpckhqdq, "punpckhqdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6D), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen uint to ulong (hi) INST3(punpcklqdq, "punpcklqdq", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6C), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen uint to ulong (lo) INST3(punpckhwd, "punpckhwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x69), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ushort to uint (hi) INST3(punpcklwd, "punpcklwd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x61), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ushort to uint (lo) INST3(unpckhpd, "unpckhpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x15), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(unpcklpd, "unpcklpd", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x14), INS_Flags_IsDstDstSrcAVXInstruction) // Packed logical (unsigned) widen ubyte to ushort (hi) INST3(packssdw, "packssdw", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x6B), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) int to short with saturation INST3(packsswb, "packsswb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x63), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) short to byte with saturation INST3(packuswb, "packuswb", IUM_WR, BAD_CODE, BAD_CODE, PCKDBL(0x67), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) short to unsigned byte with saturation // id nm um mr mi rm flags INST3(dpps, "dpps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x40), INS_Flags_IsDstDstSrcAVXInstruction) // Packed dot product of two float vector regs INST3(dppd, "dppd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x41), INS_Flags_IsDstDstSrcAVXInstruction) // Packed dot product of two double vector regs INST3(insertps, "insertps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x21), INS_Flags_IsDstDstSrcAVXInstruction) // Insert packed single precision float value INST3(pcmpeqq, "pcmpeqq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x29), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 64-bit integers for equality INST3(pcmpgtq, "pcmpgtq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x37), INS_Flags_IsDstDstSrcAVXInstruction) // Packed compare 64-bit integers for equality INST3(pmulld, "pmulld", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x40), INS_Flags_IsDstDstSrcAVXInstruction) // Packed multiply 32 bit unsigned integers and store lower 32 bits of each result INST3(ptest, "ptest", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x17), INS_FLAGS_None) // Packed logical compare INST3(phaddd, "phaddd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x02), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add INST3(pabsb, "pabsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1C), INS_FLAGS_None) // Packed absolute value of bytes INST3(pabsw, "pabsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1D), INS_FLAGS_None) // Packed absolute value of 16-bit integers INST3(pabsd, "pabsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1E), INS_FLAGS_None) // Packed absolute value of 32-bit integers INST3(palignr, "palignr", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0F), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Align Right INST3(pmaddubsw, "pmaddubsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x04), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Packed Signed and Unsigned Bytes INST3(pmulhrsw, "pmulhrsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0B), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Multiply High with Round and Scale INST3(pshufb, "pshufb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x00), INS_Flags_IsDstDstSrcAVXInstruction) // Packed Shuffle Bytes INST3(psignb, "psignb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x08), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(psignw, "psignw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x09), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(psignd, "psignd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0A), INS_Flags_IsDstDstSrcAVXInstruction) // Packed SIGN INST3(pminsb, "pminsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x38), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum signed bytes INST3(pminsd, "pminsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x39), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 32-bit signed integers INST3(pminuw, "pminuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3A), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 16-bit unsigned integers INST3(pminud, "pminud", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3B), INS_Flags_IsDstDstSrcAVXInstruction) // packed minimum 32-bit unsigned integers INST3(pmaxsb, "pmaxsb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3C), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum signed bytes INST3(pmaxsd, "pmaxsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3D), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 32-bit signed integers INST3(pmaxuw, "pmaxuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3E), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 16-bit unsigned integers INST3(pmaxud, "pmaxud", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x3F), INS_Flags_IsDstDstSrcAVXInstruction) // packed maximum 32-bit unsigned integers INST3(pmovsxbw, "pmovsxbw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x20), INS_FLAGS_None) // Packed sign extend byte to short INST3(pmovsxbd, "pmovsxbd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x21), INS_FLAGS_None) // Packed sign extend byte to int INST3(pmovsxbq, "pmovsxbq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x22), INS_FLAGS_None) // Packed sign extend byte to long INST3(pmovsxwd, "pmovsxwd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x23), INS_FLAGS_None) // Packed sign extend short to int INST3(pmovsxwq, "pmovsxwq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x24), INS_FLAGS_None) // Packed sign extend short to long INST3(pmovsxdq, "pmovsxdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x25), INS_FLAGS_None) // Packed sign extend int to long INST3(pmovzxbw, "pmovzxbw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x30), INS_FLAGS_None) // Packed zero extend byte to short INST3(pmovzxbd, "pmovzxbd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x31), INS_FLAGS_None) // Packed zero extend byte to intg INST3(pmovzxbq, "pmovzxbq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x32), INS_FLAGS_None) // Packed zero extend byte to lon INST3(pmovzxwd, "pmovzxwd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x33), INS_FLAGS_None) // Packed zero extend short to int INST3(pmovzxwq, "pmovzxwq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x34), INS_FLAGS_None) // Packed zero extend short to long INST3(pmovzxdq, "pmovzxdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x35), INS_FLAGS_None) // Packed zero extend int to long INST3(packusdw, "packusdw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x2B), INS_Flags_IsDstDstSrcAVXInstruction) // Pack (narrow) int to unsigned short with saturation INST3(roundps, "roundps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x08), INS_FLAGS_None) // Round packed single precision floating-point values INST3(roundss, "roundss", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0A), INS_Flags_IsDstSrcSrcAVXInstruction) // Round scalar single precision floating-point values INST3(roundpd, "roundpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x09), INS_FLAGS_None) // Round packed double precision floating-point values INST3(roundsd, "roundsd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0B), INS_Flags_IsDstSrcSrcAVXInstruction) // Round scalar double precision floating-point values INST3(pmuldq, "pmuldq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x28), INS_Flags_IsDstDstSrcAVXInstruction) // packed multiply 32-bit signed integers and store 64-bit result INST3(blendps, "blendps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0C), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Single Precision Floating-Point Values INST3(blendvps, "blendvps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x14), INS_FLAGS_None) // Variable Blend Packed Singles INST3(blendpd, "blendpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0D), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Double Precision Floating-Point Values INST3(blendvpd, "blendvpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x15), INS_FLAGS_None) // Variable Blend Packed Doubles INST3(pblendw, "pblendw", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x0E), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed Words INST3(pblendvb, "pblendvb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x10), INS_FLAGS_None) // Variable Blend Packed Bytes INST3(phaddw, "phaddw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x01), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add of 16-bit integers INST3(phsubw, "phsubw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x05), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 16-bit integers INST3(phsubd, "phsubd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x06), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 32-bit integers INST3(phaddsw, "phaddsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x03), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal add of 16-bit integers with saturation INST3(phsubsw, "phsubsw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x07), INS_Flags_IsDstDstSrcAVXInstruction) // Packed horizontal subtract of 16-bit integers with saturation INST3(lddqu, "lddqu", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0xF0), INS_FLAGS_None) // Load Unaligned integer INST3(movntdqa, "movntdqa", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x2A), INS_FLAGS_None) // Load Double Quadword Non-Temporal Aligned Hint INST3(movddup, "movddup", IUM_WR, BAD_CODE, BAD_CODE, SSEDBL(0x12), INS_FLAGS_None) // Replicate Double FP Values INST3(movsldup, "movsldup", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x12), INS_FLAGS_None) // Replicate even-indexed Single FP Values INST3(movshdup, "movshdup", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0x16), INS_FLAGS_None) // Replicate odd-indexed Single FP Values INST3(phminposuw, "phminposuw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x41), INS_FLAGS_None) // Packed Horizontal Word Minimum INST3(mpsadbw, "mpsadbw", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x42), INS_Flags_IsDstDstSrcAVXInstruction) // Compute Multiple Packed Sums of Absolute Difference INST3(pinsrb, "pinsrb", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x20), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Byte INST3(pinsrd, "pinsrd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x22), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Dword INST3(pinsrq, "pinsrq", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x22), INS_Flags_IsDstDstSrcAVXInstruction) // Insert Qword INST3(pextrb, "pextrb", IUM_WR, SSE3A(0x14), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Byte INST3(pextrd, "pextrd", IUM_WR, SSE3A(0x16), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Dword INST3(pextrq, "pextrq", IUM_WR, SSE3A(0x16), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Qword INST3(pextrw_sse41, "pextrw", IUM_WR, SSE3A(0x15), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Word INST3(extractps, "extractps", IUM_WR, SSE3A(0x17), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract Packed Floating-Point Values //PCLMULQDQ instructions INST3(pclmulqdq, "pclmulqdq" , IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x44), INS_Flags_IsDstDstSrcAVXInstruction) // Perform a carry-less multiplication of two quadwords //AES instructions INST3(aesdec, "aesdec", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDE), INS_Flags_IsDstDstSrcAVXInstruction) // Perform one round of an AES decryption flow INST3(aesdeclast, "aesdeclast", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDF), INS_Flags_IsDstDstSrcAVXInstruction) // Perform last round of an AES decryption flow INST3(aesenc, "aesenc", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDC), INS_Flags_IsDstDstSrcAVXInstruction) // Perform one round of an AES encryption flow INST3(aesenclast, "aesenclast", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDD), INS_Flags_IsDstDstSrcAVXInstruction) // Perform last round of an AES encryption flow INST3(aesimc, "aesimc", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xDB), INS_FLAGS_None) // Perform the AES InvMixColumn Transformation INST3(aeskeygenassist, "aeskeygenassist", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0xDF), INS_FLAGS_None) // AES Round Key Generation Assist INST3(LAST_SSE_INSTRUCTION, "LAST_SSE_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(FIRST_AVX_INSTRUCTION, "FIRST_AVX_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // AVX only instructions INST3(vbroadcastss, "broadcastss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x18), INS_FLAGS_None) // Broadcast float value read from memory to entire ymm register INST3(vbroadcastsd, "broadcastsd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x19), INS_FLAGS_None) // Broadcast float value read from memory to entire ymm register INST3(vpbroadcastb, "pbroadcastb", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x78), INS_FLAGS_None) // Broadcast int8 value from reg/memory to entire ymm register INST3(vpbroadcastw, "pbroadcastw", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x79), INS_FLAGS_None) // Broadcast int16 value from reg/memory to entire ymm register INST3(vpbroadcastd, "pbroadcastd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x58), INS_FLAGS_None) // Broadcast int32 value from reg/memory to entire ymm register INST3(vpbroadcastq, "pbroadcastq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x59), INS_FLAGS_None) // Broadcast int64 value from reg/memory to entire ymm register INST3(vextractf128, "extractf128", IUM_WR, SSE3A(0x19), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract 128-bit packed floating point values INST3(vextracti128, "extracti128", IUM_WR, SSE3A(0x39), BAD_CODE, BAD_CODE, INS_FLAGS_None) // Extract 128-bit packed integer values INST3(vinsertf128, "insertf128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x18), INS_Flags_IsDstDstSrcAVXInstruction) // Insert 128-bit packed floating point values INST3(vinserti128, "inserti128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x38), INS_Flags_IsDstDstSrcAVXInstruction) // Insert 128-bit packed integer values INST3(vzeroupper, "zeroupper", IUM_WR, 0xC577F8, BAD_CODE, BAD_CODE, INS_FLAGS_None) // Zero upper 128-bits of all YMM regs (includes 2-byte fixed VEX prefix) INST3(vperm2i128, "perm2i128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x46), INS_Flags_IsDstDstSrcAVXInstruction) // Permute 128-bit halves of input register INST3(vpermq, "permq", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x00), INS_FLAGS_None) // Permute 64-bit of input register INST3(vpblendd, "pblendd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x02), INS_Flags_IsDstDstSrcAVXInstruction) // Blend Packed DWORDs INST3(vblendvps, "blendvps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4A), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Singles INST3(vblendvpd, "blendvpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4B), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Doubles INST3(vpblendvb, "pblendvb", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x4C), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Blend Packed Bytes INST3(vtestps, "testps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0E), INS_FLAGS_None) // Packed Bit Test INST3(vtestpd, "testpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0F), INS_FLAGS_None) // Packed Bit Test INST3(vpsrlvd, "psrlvd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x45), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Logical INST3(vpsrlvq, "psrlvq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x45), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Logical INST3(vpsravd, "psravd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x46), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Right Arithmetic INST3(vpsllvd, "psllvd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x47), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Left Logical INST3(vpsllvq, "psllvq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x47), INS_Flags_IsDstDstSrcAVXInstruction) // Variable Bit Shift Left Logical INST3(vpermilps, "permilps", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x04), INS_FLAGS_None) // Permute In-Lane of Quadruples of Single-Precision Floating-Point Values INST3(vpermilpd, "permilpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x05), INS_FLAGS_None) // Permute In-Lane of Quadruples of Double-Precision Floating-Point Values INST3(vpermilpsvar, "permilpsvar", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0C), INS_Flags_IsDstDstSrcAVXInstruction) // Permute In-Lane of Quadruples of Single-Precision Floating-Point Values INST3(vpermilpdvar, "permilpdvar", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x0D), INS_Flags_IsDstDstSrcAVXInstruction) // Permute In-Lane of Quadruples of Double-Precision Floating-Point Values INST3(vperm2f128, "perm2f128", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x06), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Floating-Point Values INST3(vpermpd, "permpd", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0x01), INS_FLAGS_None) // Permute Double-Precision Floating-Point Values INST3(vpermd, "permd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x36), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Packed Doublewords Elements INST3(vpermps, "permps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x16), INS_Flags_IsDstDstSrcAVXInstruction) // Permute Single-Precision Floating-Point Elements INST3(vbroadcastf128, "broadcastf128", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x1A), INS_FLAGS_None) // Broadcast packed float values read from memory to entire ymm register INST3(vbroadcasti128, "broadcasti128", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x5A), INS_FLAGS_None) // Broadcast packed integer values read from memory to entire ymm register INST3(vmaskmovps, "maskmovps", IUM_WR, SSE38(0x2E), BAD_CODE, SSE38(0x2C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Packed Single-Precision Floating-Point Loads and Stores INST3(vmaskmovpd, "maskmovpd", IUM_WR, SSE38(0x2F), BAD_CODE, SSE38(0x2D), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Packed Double-Precision Floating-Point Loads and Stores INST3(vpmaskmovd, "pmaskmovd", IUM_WR, SSE38(0x8E), BAD_CODE, SSE38(0x8C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Integer Packed Dword Loads and Stores INST3(vpmaskmovq, "pmaskmovq", IUM_WR, SSE38(0x8E), BAD_CODE, SSE38(0x8C), INS_Flags_IsDstDstSrcAVXInstruction) // Conditional SIMD Integer Packed Qword Loads and Stores INST3(vpgatherdd, "pgatherdd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x90), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword Values Using Signed Dword INST3(vpgatherqd, "pgatherqd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x91), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword Values Using Signed Qword INST3(vpgatherdq, "pgatherdq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x90), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Dword with Signed Dword Indices INST3(vpgatherqq, "pgatherqq", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x91), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed Qword with Signed Dword Indices INST3(vgatherdps, "gatherdps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x92), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed SP FP values Using Signed Dword Indices INST3(vgatherqps, "gatherqps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x93), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed SP FP values Using Signed Qword Indices INST3(vgatherdpd, "gatherdpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x92), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed DP FP Values Using Signed Dword Indices INST3(vgatherqpd, "gatherqpd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x93), INS_Flags_IsDstDstSrcAVXInstruction) // Gather Packed DP FP Values Using Signed Qword Indices INST3(FIRST_FMA_INSTRUCTION, "FIRST_FMA_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // id nm um mr mi rm flags INST3(vfmadd132pd, "fmadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x98), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Packed Double-Precision Floating-Point Values INST3(vfmadd213pd, "fmadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231pd, "fmadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132ps, "fmadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x98), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Packed Single-Precision Floating-Point Values INST3(vfmadd213ps, "fmadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231ps, "fmadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB8), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132sd, "fmadd132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x99), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Scalar Double-Precision Floating-Point Values INST3(vfmadd213sd, "fmadd213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231sd, "fmadd231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd132ss, "fmadd132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x99), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Add of Scalar Single-Precision Floating-Point Values INST3(vfmadd213ss, "fmadd213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmadd231ss, "fmadd231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB9), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub132pd, "fmaddsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x96), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values INST3(vfmaddsub213pd, "fmaddsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub231pd, "fmaddsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub132ps, "fmaddsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x96), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values INST3(vfmaddsub213ps, "fmaddsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmaddsub231ps, "fmaddsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB6), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd132pd, "fmsubadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x97), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values INST3(vfmsubadd213pd, "fmsubadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd231pd, "fmsubadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd132ps, "fmsubadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x97), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values INST3(vfmsubadd213ps, "fmsubadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xA7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsubadd231ps, "fmsubadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xB7), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132pd, "fmsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9A), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values INST3(vfmsub213pd, "fmsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231pd, "fmsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132ps, "fmsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9A), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values INST3(vfmsub213ps, "fmsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231ps, "fmsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBA), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132sd, "fmsub132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9B), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values INST3(vfmsub213sd, "fmsub213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231sd, "fmsub231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub132ss, "fmsub132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9B), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values INST3(vfmsub213ss, "fmsub213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfmsub231ss, "fmsub231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBB), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132pd, "fnmadd132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9C), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values INST3(vfnmadd213pd, "fnmadd213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231pd, "fnmadd231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132ps, "fnmadd132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9C), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values INST3(vfnmadd213ps, "fnmadd213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231ps, "fnmadd231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBC), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132sd, "fnmadd132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9D), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values INST3(vfnmadd213sd, "fnmadd213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231sd, "fnmadd231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd132ss, "fnmadd132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9D), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values INST3(vfnmadd213ss, "fnmadd213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmadd231ss, "fnmadd231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBD), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132pd, "fnmsub132pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9E), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values INST3(vfnmsub213pd, "fnmsub213pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231pd, "fnmsub231pd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132ps, "fnmsub132ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9E), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values INST3(vfnmsub213ps, "fnmsub213ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231ps, "fnmsub231ps", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBE), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132sd, "fnmsub132sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9F), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values INST3(vfnmsub213sd, "fnmsub213sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231sd, "fnmsub231sd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub132ss, "fnmsub132ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x9F), INS_Flags_IsDstDstSrcAVXInstruction) // Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values INST3(vfnmsub213ss, "fnmsub213ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xAF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(vfnmsub231ss, "fnmsub231ss", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xBF), INS_Flags_IsDstDstSrcAVXInstruction) // INST3(LAST_FMA_INSTRUCTION, "LAST_FMA_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(FIRST_AVXVNNI_INSTRUCTION, "FIRST_AVXVNNI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(vpdpbusd, "pdpbusd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x50), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Unsigned and Signed Bytes INST3(vpdpwssd, "pdpwssd", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x52), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Signed Word Integers INST3(vpdpbusds, "pdpbusds", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x51), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Unsigned and Signed Bytes with Saturation INST3(vpdpwssds, "pdpwssds", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0x53), INS_Flags_IsDstDstSrcAVXInstruction) // Multiply and Add Signed Word Integers with Saturation INST3(LAST_AVXVNNI_INSTRUCTION, "LAST_AVXVNNI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // BMI1 INST3(FIRST_BMI_INSTRUCTION, "FIRST_BMI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(andn, "andn", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF2), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Resets_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Logical AND NOT INST3(blsi, "blsi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Extract Lowest Set Isolated Bit INST3(blsmsk, "blsmsk", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), INS_Flags_IsDstDstSrcAVXInstruction) // Get Mask Up to Lowest Set Bit INST3(blsr, "blsr", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF3), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Reset Lowest Set Bit INST3(bextr, "bextr", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF7), INS_Flags_IsDstDstSrcAVXInstruction) // Bit Field Extract // BMI2 INST3(rorx, "rorx", IUM_WR, BAD_CODE, BAD_CODE, SSE3A(0xF0), INS_FLAGS_None) INST3(pdep, "pdep", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Deposit INST3(pext, "pext", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), INS_Flags_IsDstDstSrcAVXInstruction) // Parallel Bits Extract INST3(bzhi, "bzhi", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF5), Resets_OF | Writes_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_Flags_IsDstDstSrcAVXInstruction) // Zero High Bits Starting with Specified Bit Position INST3(mulx, "mulx", IUM_WR, BAD_CODE, BAD_CODE, SSE38(0xF6), INS_Flags_IsDstDstSrcAVXInstruction) // Unsigned Multiply Without Affecting Flags INST3(LAST_BMI_INSTRUCTION, "LAST_BMI_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) INST3(LAST_AVX_INSTRUCTION, "LAST_AVX_INSTRUCTION", IUM_WR, BAD_CODE, BAD_CODE, BAD_CODE, INS_FLAGS_None) // Scalar instructions in SSE4.2 INST3(crc32, "crc32", IUM_WR, BAD_CODE, BAD_CODE, PACK4(0xF2, 0x0F, 0x38, 0xF0), INS_FLAGS_None) // BMI1 INST3(tzcnt, "tzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBC), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF ) // Count the Number of Trailing Zero Bits // LZCNT INST3(lzcnt, "lzcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xBD), Undefined_OF | Undefined_SF | Writes_ZF | Undefined_AF | Undefined_PF | Writes_CF ) // POPCNT INST3(popcnt, "popcnt", IUM_WR, BAD_CODE, BAD_CODE, SSEFLT(0xB8), Resets_OF | Resets_SF | Writes_ZF | Resets_AF | Resets_PF | Resets_CF ) // id nm um mr mi flags INST2(ret, "ret", IUM_RD, 0x0000C3, 0x0000C2, INS_FLAGS_None ) INST2(loop, "loop", IUM_RD, BAD_CODE, 0x0000E2, INS_FLAGS_None ) INST2(call, "call", IUM_RD, 0x0010FF, 0x0000E8, INS_FLAGS_None ) INST2(rol, "rol", IUM_RW, 0x0000D2, BAD_CODE, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rol_1, "rol", IUM_RW, 0x0000D0, 0x0000D0, Writes_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rol_N, "rol", IUM_RW, 0x0000C0, 0x0000C0, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror, "ror", IUM_RW, 0x0008D2, BAD_CODE, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror_1, "ror", IUM_RW, 0x0008D0, 0x0008D0, Writes_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(ror_N, "ror", IUM_RW, 0x0008C0, 0x0008C0, Undefined_OF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(rcl, "rcl", IUM_RW, 0x0010D2, BAD_CODE, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcl_1, "rcl", IUM_RW, 0x0010D0, 0x0010D0, Writes_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcl_N, "rcl", IUM_RW, 0x0010C0, 0x0010C0, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr, "rcr", IUM_RW, 0x0018D2, BAD_CODE, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr_1, "rcr", IUM_RW, 0x0018D0, 0x0018D0, Writes_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(rcr_N, "rcr", IUM_RW, 0x0018C0, 0x0018C0, Undefined_OF | Writes_CF | Reads_CF | INS_FLAGS_Has_Wbit ) INST2(shl, "shl", IUM_RW, 0x0020D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shl_1, "shl", IUM_RW, 0x0020D0, 0x0020D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shl_N, "shl", IUM_RW, 0x0020C0, 0x0020C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr, "shr", IUM_RW, 0x0028D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr_1, "shr", IUM_RW, 0x0028D0, 0x0028D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(shr_N, "shr", IUM_RW, 0x0028C0, 0x0028C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar, "sar", IUM_RW, 0x0038D2, BAD_CODE, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar_1, "sar", IUM_RW, 0x0038D0, 0x0038D0, Writes_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST2(sar_N, "sar", IUM_RW, 0x0038C0, 0x0038C0, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) // id nm um mr flags INST1(r_movsb, "rep movsb", IUM_RD, 0x00A4F3, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(r_movsd, "rep movsd", IUM_RD, 0x00A5F3, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(r_movsq, "rep movsq", IUM_RD, 0xF3A548, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(movsb, "movsb", IUM_RD, 0x0000A4, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(movsd, "movsd", IUM_RD, 0x0000A5, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(movsq, "movsq", IUM_RD, 0x00A548, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(r_stosb, "rep stosb", IUM_RD, 0x00AAF3, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(r_stosd, "rep stosd", IUM_RD, 0x00ABF3, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(r_stosq, "rep stosq", IUM_RD, 0xF3AB48, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(stosb, "stosb", IUM_RD, 0x0000AA, Reads_DF | INS_FLAGS_Has_Wbit ) INST1(stosd, "stosd", IUM_RD, 0x0000AB, Reads_DF | INS_FLAGS_Has_Wbit ) #if defined(TARGET_AMD64) INST1(stosq, "stosq", IUM_RD, 0x00AB48, Reads_DF ) #endif // defined(TARGET_AMD64) INST1(int3, "int3", IUM_RD, 0x0000CC, INS_FLAGS_None ) INST1(nop, "nop", IUM_RD, 0x000090, INS_FLAGS_None ) INST1(pause, "pause", IUM_RD, 0x0090F3, INS_FLAGS_None ) INST1(lock, "lock", IUM_RD, 0x0000F0, INS_FLAGS_None ) INST1(leave, "leave", IUM_RD, 0x0000C9, INS_FLAGS_None ) INST1(neg, "neg", IUM_RW, 0x0018F6, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(not, "not", IUM_RW, 0x0010F6, INS_FLAGS_None | INS_FLAGS_Has_Wbit ) INST1(cwde, "cwde", IUM_RD, 0x000098, INS_FLAGS_None ) INST1(cdq, "cdq", IUM_RD, 0x000099, INS_FLAGS_None ) INST1(idiv, "idiv", IUM_RD, 0x0038F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF | INS_FLAGS_Has_Wbit ) INST1(imulEAX, "imul", IUM_RD, 0x0028F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(div, "div", IUM_RD, 0x0030F6, Undefined_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Undefined_CF | INS_FLAGS_Has_Wbit ) INST1(mulEAX, "mul", IUM_RD, 0x0020F6, Writes_OF | Undefined_SF | Undefined_ZF | Undefined_AF | Undefined_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(sahf, "sahf", IUM_RD, 0x00009E, Restore_SF_ZF_AF_PF_CF ) INST1(xadd, "xadd", IUM_RW, 0x0F00C0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(cmpxchg, "cmpxchg", IUM_RW, 0x0F00B0, Writes_OF | Writes_SF | Writes_ZF | Writes_AF | Writes_PF | Writes_CF | INS_FLAGS_Has_Wbit ) INST1(shld, "shld", IUM_RW, 0x0F00A4, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF ) INST1(shrd, "shrd", IUM_RW, 0x0F00AC, Undefined_OF | Writes_SF | Writes_ZF | Undefined_AF | Writes_PF | Writes_CF ) // For RyuJIT/x86, we follow the x86 calling convention that requires // us to return floating point value on the x87 FP stack, so we need // these instructions regardless of whether we're using full stack fp. #ifdef TARGET_X86 INST1(fld, "fld", IUM_WR, 0x0000D9, INS_FLAGS_x87Instr) INST1(fstp, "fstp", IUM_WR, 0x0018D9, INS_FLAGS_x87Instr) #endif // TARGET_X86 INST1(seto, "seto", IUM_WR, 0x0F0090, Reads_OF ) INST1(setno, "setno", IUM_WR, 0x0F0091, Reads_OF ) INST1(setb, "setb", IUM_WR, 0x0F0092, Reads_CF ) INST1(setae, "setae", IUM_WR, 0x0F0093, Reads_CF ) INST1(sete, "sete", IUM_WR, 0x0F0094, Reads_ZF ) INST1(setne, "setne", IUM_WR, 0x0F0095, Reads_ZF ) INST1(setbe, "setbe", IUM_WR, 0x0F0096, Reads_ZF | Reads_CF ) INST1(seta, "seta", IUM_WR, 0x0F0097, Reads_ZF | Reads_CF ) INST1(sets, "sets", IUM_WR, 0x0F0098, Reads_SF ) INST1(setns, "setns", IUM_WR, 0x0F0099, Reads_SF ) INST1(setp, "setp", IUM_WR, 0x0F009A, Reads_PF ) INST1(setnp, "setnp", IUM_WR, 0x0F009B, Reads_PF ) INST1(setl, "setl", IUM_WR, 0x0F009C, Reads_OF | Reads_SF ) INST1(setge, "setge", IUM_WR, 0x0F009D, Reads_OF | Reads_SF ) INST1(setle, "setle", IUM_WR, 0x0F009E, Reads_OF | Reads_SF | Reads_ZF ) INST1(setg, "setg", IUM_WR, 0x0F009F, Reads_OF | Reads_SF | Reads_ZF ) // Indirect jump used for tailcalls. We differentiate between func-internal // indirect jump (e.g. used for switch) and tailcall indirect jumps because the // x64 unwinder might require the latter to be rex.w prefixed. INST1(tail_i_jmp, "tail.jmp", IUM_RD, 0x0020FF, INS_FLAGS_None ) INST1(i_jmp, "jmp", IUM_RD, 0x0020FF, INS_FLAGS_None ) INST0(jmp, "jmp", IUM_RD, 0x0000EB, INS_FLAGS_None ) INST0(jo, "jo", IUM_RD, 0x000070, Reads_OF ) INST0(jno, "jno", IUM_RD, 0x000071, Reads_OF ) INST0(jb, "jb", IUM_RD, 0x000072, Reads_CF ) INST0(jae, "jae", IUM_RD, 0x000073, Reads_CF ) INST0(je, "je", IUM_RD, 0x000074, Reads_ZF ) INST0(jne, "jne", IUM_RD, 0x000075, Reads_ZF ) INST0(jbe, "jbe", IUM_RD, 0x000076, Reads_ZF | Reads_CF ) INST0(ja, "ja", IUM_RD, 0x000077, Reads_ZF | Reads_CF ) INST0(js, "js", IUM_RD, 0x000078, Reads_SF ) INST0(jns, "jns", IUM_RD, 0x000079, Reads_SF ) INST0(jp, "jp", IUM_RD, 0x00007A, Reads_PF ) INST0(jnp, "jnp", IUM_RD, 0x00007B, Reads_PF ) INST0(jl, "jl", IUM_RD, 0x00007C, Reads_OF | Reads_SF ) INST0(jge, "jge", IUM_RD, 0x00007D, Reads_OF | Reads_SF ) INST0(jle, "jle", IUM_RD, 0x00007E, Reads_OF | Reads_SF | Reads_ZF ) INST0(jg, "jg", IUM_RD, 0x00007F, Reads_OF | Reads_SF | Reads_ZF ) INST0(l_jmp, "jmp", IUM_RD, 0x0000E9, INS_FLAGS_None ) INST0(l_jo, "jo", IUM_RD, 0x00800F, Reads_OF ) INST0(l_jno, "jno", IUM_RD, 0x00810F, Reads_OF ) INST0(l_jb, "jb", IUM_RD, 0x00820F, Reads_CF ) INST0(l_jae, "jae", IUM_RD, 0x00830F, Reads_CF ) INST0(l_je, "je", IUM_RD, 0x00840F, Reads_ZF ) INST0(l_jne, "jne", IUM_RD, 0x00850F, Reads_ZF ) INST0(l_jbe, "jbe", IUM_RD, 0x00860F, Reads_ZF | Reads_CF ) INST0(l_ja, "ja", IUM_RD, 0x00870F, Reads_ZF | Reads_CF ) INST0(l_js, "js", IUM_RD, 0x00880F, Reads_SF ) INST0(l_jns, "jns", IUM_RD, 0x00890F, Reads_SF ) INST0(l_jp, "jp", IUM_RD, 0x008A0F, Reads_PF ) INST0(l_jnp, "jnp", IUM_RD, 0x008B0F, Reads_PF ) INST0(l_jl, "jl", IUM_RD, 0x008C0F, Reads_OF | Reads_SF ) INST0(l_jge, "jge", IUM_RD, 0x008D0F, Reads_OF | Reads_SF ) INST0(l_jle, "jle", IUM_RD, 0x008E0F, Reads_OF | Reads_SF | Reads_ZF ) INST0(l_jg, "jg", IUM_RD, 0x008F0F, Reads_OF | Reads_SF | Reads_ZF ) INST0(align, "align", IUM_RD, BAD_CODE, INS_FLAGS_None) /*****************************************************************************/ #undef INST0 #undef INST1 #undef INST2 #undef INST3 #undef INST4 #undef INST5 /*****************************************************************************/ // clang-format on
1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/jit/lower.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _LOWER_H_ #define _LOWER_H_ #include "compiler.h" #include "phase.h" #include "lsra.h" #include "sideeffects.h" class Lowering final : public Phase { public: inline Lowering(Compiler* compiler, LinearScanInterface* lsra) : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM) { m_lsra = (LinearScan*)lsra; assert(m_lsra); } virtual PhaseStatus DoPhase() override; // This variant of LowerRange is called from outside of the main Lowering pass, // so it creates its own instance of Lowering to do so. void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range) { Lowering lowerer(comp, m_lsra); lowerer.m_block = block; lowerer.LowerRange(range); } private: // LowerRange handles new code that is introduced by or after Lowering. void LowerRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { LowerNode(newNode); } } void LowerRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); LowerRange(range); } // ContainCheckRange handles new code that is introduced by or after Lowering, // and that is known to be already in Lowered form. void ContainCheckRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { ContainCheckNode(newNode); } } void ContainCheckRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); ContainCheckRange(range); } void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree) { LIR::Range range = LIR::SeqTree(comp, tree); ContainCheckRange(range); BlockRange().InsertBefore(insertionPoint, std::move(range)); } void ContainCheckNode(GenTree* node); void ContainCheckDivOrMod(GenTreeOp* node); void ContainCheckReturnTrap(GenTreeOp* node); void ContainCheckArrOffset(GenTreeArrOffs* node); void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); void ContainCheckJTrue(GenTreeOp* node); void ContainCheckBitCast(GenTree* node); void ContainCheckCallOperands(GenTreeCall* call); void ContainCheckIndir(GenTreeIndir* indirNode); void ContainCheckStoreIndir(GenTreeStoreInd* indirNode); void ContainCheckMul(GenTreeOp* node); void ContainCheckShiftRotate(GenTreeOp* node); void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const; void ContainCheckCast(GenTreeCast* node); void ContainCheckCompare(GenTreeOp* node); void ContainCheckBinary(GenTreeOp* node); void ContainCheckBoundsChk(GenTreeBoundsChk* node); #ifdef TARGET_XARCH void ContainCheckFloatBinary(GenTreeOp* node); void ContainCheckIntrinsic(GenTreeOp* node); #endif // TARGET_XARCH #ifdef FEATURE_SIMD void ContainCheckSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr); void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS #ifdef DEBUG static void CheckCallArg(GenTree* arg); static void CheckCall(GenTreeCall* call); static void CheckNode(Compiler* compiler, GenTree* node); static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsInvariantInRange(GenTree* node, GenTree* endExclusive); // ------------------------------ // Call Lowering // ------------------------------ void LowerCall(GenTree* call); void LowerCFGCall(GenTreeCall* call); void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif GenTree* OptimizeConstCompare(GenTree* cmp); GenTree* LowerCompare(GenTree* cmp); GenTree* LowerJTrue(GenTreeOp* jtrue); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); void LowerJmpMethod(GenTree* jmp); void LowerRet(GenTreeUnOp* ret); void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); void LowerRetStruct(GenTreeUnOp* ret); void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); void LowerCallStruct(GenTreeCall* call); void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI GenTree* LowerDelegateInvoke(GenTreeCall* call); GenTree* LowerIndirectNonvirtCall(GenTreeCall* call); GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); void LowerFastTailCall(GenTreeCall* callNode); void RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode); void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); void LowerArgsForCall(GenTreeCall* call); void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type); void LowerArg(GenTreeCall* call, GenTree** ppTree); #ifdef TARGET_ARMARCH GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif void InsertPInvokeCallProlog(GenTreeCall* call); void InsertPInvokeCallEpilog(GenTreeCall* call); void InsertPInvokeMethodProlog(); void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction { PushFrame, PopFrame }; GenTree* CreateFrameLinkUpdate(FrameLinkAction); GenTree* AddrGen(ssize_t addr); GenTree* AddrGen(void* addr); GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL) { return comp->gtNewOperNode(GT_IND, type, tree); } GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL) { return comp->gtNewPhysRegNode(reg, type); } GenTree* ThisReg(GenTreeCall* call) { return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF); } GenTree* Offset(GenTree* base, unsigned offset) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset); } GenTree* OffsetByIndex(GenTree* base, GenTree* index) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0); } GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0); } // Replace the definition of the given use with a lclVar, allocating a new temp // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node. GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM) { GenTree* oldUseNode = use.Def(); if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM)) { GenTree* assign; use.ReplaceWithLclVar(comp, tempNum, &assign); GenTree* newUseNode = use.Def(); ContainCheckRange(oldUseNode->gtNext, newUseNode); // We need to lower the LclVar and assignment since there may be certain // types or scenarios, such as TYP_SIMD12, that need special handling LowerNode(assign); LowerNode(newUseNode); return newUseNode->AsLclVar(); } return oldUseNode->AsLclVar(); } // return true if this call target is within range of a pc-rel call on the machine bool IsCallTargetInRange(void* addr); #if defined(TARGET_XARCH) GenTree* PreferredRegOptionalOperand(GenTree* tree); // ------------------------------------------------------------------ // SetRegOptionalBinOp - Indicates which of the operands of a bin-op // register requirement is optional. Xarch instruction set allows // either of op1 or op2 of binary operation (e.g. add, mul etc) to be // a memory operand. This routine provides info to register allocator // which of its operands optionally require a register. Lsra might not // allocate a register to RefTypeUse positions of such operands if it // is beneficial. In such a case codegen will treat them as memory // operands. // // Arguments: // tree - Gentree of a binary operation. // isSafeToMarkOp1 True if it's safe to mark op1 as register optional // isSafeToMarkOp2 True if it's safe to mark op2 as register optional // // Returns // The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2 // by calling IsSafeToContainMem. // // Note: On xarch at most only one of the operands will be marked as // reg optional, even when both operands could be considered register // optional. void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2) { assert(GenTree::OperIsBinary(tree->OperGet())); GenTree* const op1 = tree->gtGetOp1(); GenTree* const op2 = tree->gtGetOp2(); const unsigned operatorSize = genTypeSize(tree->TypeGet()); const bool op1Legal = isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet())); const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet())); GenTree* regOptionalOperand = nullptr; if (op1Legal) { regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1; } else if (op2Legal) { regOptionalOperand = op2; } if (regOptionalOperand != nullptr) { regOptionalOperand->SetRegOptional(); } } #endif // defined(TARGET_XARCH) // Per tree node member functions void LowerStoreIndirCommon(GenTreeStoreInd* ind); void LowerIndir(GenTreeIndir* ind); void LowerStoreIndir(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); bool LowerUnsignedDivOrMod(GenTreeOp* divMod); GenTree* LowerConstIntDivOrMod(GenTree* node); GenTree* LowerSignedDivOrMod(GenTree* node); void LowerBlockStore(GenTreeBlk* blkNode); void LowerBlockStoreCommon(GenTreeBlk* blkNode); void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr); void LowerPutArgStk(GenTreePutArgStk* tree); bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent); bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode); GenTree* LowerSwitch(GenTree* node); bool TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); void LowerCast(GenTree* node); #if !CPU_LOAD_STORE_ARCH bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd); bool IsBinOpInRMWStoreInd(GenTree* tree); bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource); bool LowerRMWMemOp(GenTreeIndir* storeInd); #endif void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node); bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc); void LowerStoreLoc(GenTreeLclVarCommon* tree); GenTree* LowerArrElem(GenTree* node); void LowerRotate(GenTree* tree); void LowerShift(GenTreeOp* shift); #ifdef FEATURE_SIMD void LowerSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void LowerHWIntrinsic(GenTreeHWIntrinsic* node); void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node); GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); #endif // !TARGET_XARCH && !TARGET_ARM64 union VectorConstant { int8_t i8[32]; uint8_t u8[32]; int16_t i16[16]; uint16_t u16[16]; int32_t i32[8]; uint32_t u32[8]; int64_t i64[4]; uint64_t u64[4]; float f32[8]; double f64[4]; }; //---------------------------------------------------------------------------------------------- // VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality // // Arguments: // vecCns - Constant vector // count - Amount of i64 components to compare // // Returns: // true if N i64 elements of the given vector are equal static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count) { assert(count >= 1 && count <= 4); for (int i = 1; i < count; i++) { if (vecCns.i64[i] != vecCns.i64[0]) { return false; } } return true; } //---------------------------------------------------------------------------------------------- // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method // // Arguments: // arg - The argument to process // argIdx - The index of the argument being processed // vecCns - The vector constant being constructed // baseType - The base type of the vector constant // // Returns: // true if arg was a constant; otherwise, false static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType) { switch (baseType) { case TYP_BYTE: case TYP_UBYTE: { if (arg->IsCnsIntOrI()) { vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i8[argIdx] == 0); } break; } case TYP_SHORT: case TYP_USHORT: { if (arg->IsCnsIntOrI()) { vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i16[argIdx] == 0); } break; } case TYP_INT: case TYP_UINT: { if (arg->IsCnsIntOrI()) { vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i32[argIdx] == 0); } break; } case TYP_LONG: case TYP_ULONG: { #if defined(TARGET_64BIT) if (arg->IsCnsIntOrI()) { vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal); return true; } #else if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI()) { // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT // We need to reconstruct the 64-bit value in order to handle this INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal; gtLconVal <<= 32; gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal; vecCns.i64[argIdx] = gtLconVal; return true; } #endif // TARGET_64BIT else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i64[argIdx] == 0); } break; } case TYP_FLOAT: { if (arg->IsCnsFltOrDbl()) { vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i32, rather than f32, to account for -0.0 assert(vecCns.i32[argIdx] == 0); } break; } case TYP_DOUBLE: { if (arg->IsCnsFltOrDbl()) { vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i64, rather than f64, to account for -0.0 assert(vecCns.i64[argIdx] == 0); } break; } default: { unreached(); } } return false; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at // least the size of expectedType // // Arguments: // expectedType - The expected type of the cast operation input if it is to be removed // op - The tree to remove if it is a cast op whose input is at least the size of expectedType // // Returns: // op if it was not a cast node or if its input is not at least the size of expected type; // Otherwise, it returns the underlying operation that was being casted GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op) { if (!op->OperIs(GT_CAST)) { return op; } GenTree* castOp = op->AsCast()->CastOp(); if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType)) { BlockRange().Remove(op); return castOp; } return op; } // Utility functions public: static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB); // return true if 'childNode' is an immediate that can be contained // by the 'parentNode' (i.e. folded into an instruction) // for example small enough and non-relocatable bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const; // Return true if 'node' is a containable memory op. bool IsContainableMemoryOp(GenTree* node) const { return m_lsra->isContainableMemoryOp(node); } #ifdef FEATURE_HW_INTRINSICS // Tries to get a containable node for a given HWIntrinsic bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode = nullptr); #endif // FEATURE_HW_INTRINSICS static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block); private: static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd); bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index); // Makes 'childNode' contained in the 'parentNode' void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const; // Checks and makes 'childNode' contained in the 'parentNode' bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode); // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode // can be contained. bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const; // Similar to above, but allows bypassing a "transparent" parent. bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const; inline LIR::Range& BlockRange() const { return LIR::AsRange(m_block); } // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister. // This method checks, and asserts in the DEBUG case if it is not so marked, // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code. // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD. void verifyLclFldDoNotEnregister(unsigned lclNum) { LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); // Do a couple of simple checks before setting lvDoNotEnregister. // This may not cover all cases in 'isRegCandidate()' but we don't want to // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister. if (varDsc->lvTracked && !varDsc->lvDoNotEnregister) { assert(!m_lsra->isRegCandidate(varDsc)); comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } } LinearScan* m_lsra; unsigned vtableCallTemp; // local variable we use as a temp for vtable calls mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate BasicBlock* m_block; }; #endif // _LOWER_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _LOWER_H_ #define _LOWER_H_ #include "compiler.h" #include "phase.h" #include "lsra.h" #include "sideeffects.h" class Lowering final : public Phase { public: inline Lowering(Compiler* compiler, LinearScanInterface* lsra) : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM) { m_lsra = (LinearScan*)lsra; assert(m_lsra); } virtual PhaseStatus DoPhase() override; // This variant of LowerRange is called from outside of the main Lowering pass, // so it creates its own instance of Lowering to do so. void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range) { Lowering lowerer(comp, m_lsra); lowerer.m_block = block; lowerer.LowerRange(range); } private: // LowerRange handles new code that is introduced by or after Lowering. void LowerRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { LowerNode(newNode); } } void LowerRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); LowerRange(range); } // ContainCheckRange handles new code that is introduced by or after Lowering, // and that is known to be already in Lowered form. void ContainCheckRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { ContainCheckNode(newNode); } } void ContainCheckRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); ContainCheckRange(range); } void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree) { LIR::Range range = LIR::SeqTree(comp, tree); ContainCheckRange(range); BlockRange().InsertBefore(insertionPoint, std::move(range)); } void ContainCheckNode(GenTree* node); void ContainCheckDivOrMod(GenTreeOp* node); void ContainCheckReturnTrap(GenTreeOp* node); void ContainCheckArrOffset(GenTreeArrOffs* node); void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); void ContainCheckJTrue(GenTreeOp* node); void ContainCheckBitCast(GenTree* node); void ContainCheckCallOperands(GenTreeCall* call); void ContainCheckIndir(GenTreeIndir* indirNode); void ContainCheckStoreIndir(GenTreeStoreInd* indirNode); void ContainCheckMul(GenTreeOp* node); void ContainCheckShiftRotate(GenTreeOp* node); void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const; void ContainCheckCast(GenTreeCast* node); void ContainCheckCompare(GenTreeOp* node); void ContainCheckBinary(GenTreeOp* node); void ContainCheckBoundsChk(GenTreeBoundsChk* node); #ifdef TARGET_XARCH void ContainCheckFloatBinary(GenTreeOp* node); void ContainCheckIntrinsic(GenTreeOp* node); #endif // TARGET_XARCH #ifdef FEATURE_SIMD void ContainCheckSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr); void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS #ifdef DEBUG static void CheckCallArg(GenTree* arg); static void CheckCall(GenTreeCall* call); static void CheckNode(Compiler* compiler, GenTree* node); static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsInvariantInRange(GenTree* node, GenTree* endExclusive); // ------------------------------ // Call Lowering // ------------------------------ void LowerCall(GenTree* call); void LowerCFGCall(GenTreeCall* call); void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif GenTree* OptimizeConstCompare(GenTree* cmp); GenTree* LowerCompare(GenTree* cmp); GenTree* LowerJTrue(GenTreeOp* jtrue); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); void LowerJmpMethod(GenTree* jmp); void LowerRet(GenTreeUnOp* ret); void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); void LowerRetStruct(GenTreeUnOp* ret); void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); void LowerCallStruct(GenTreeCall* call); void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI GenTree* LowerDelegateInvoke(GenTreeCall* call); GenTree* LowerIndirectNonvirtCall(GenTreeCall* call); GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); void LowerFastTailCall(GenTreeCall* callNode); void RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode); void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); void LowerArgsForCall(GenTreeCall* call); void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type); void LowerArg(GenTreeCall* call, GenTree** ppTree); #ifdef TARGET_ARMARCH GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif void InsertPInvokeCallProlog(GenTreeCall* call); void InsertPInvokeCallEpilog(GenTreeCall* call); void InsertPInvokeMethodProlog(); void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction { PushFrame, PopFrame }; GenTree* CreateFrameLinkUpdate(FrameLinkAction); GenTree* AddrGen(ssize_t addr); GenTree* AddrGen(void* addr); GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL) { return comp->gtNewOperNode(GT_IND, type, tree); } GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL) { return comp->gtNewPhysRegNode(reg, type); } GenTree* ThisReg(GenTreeCall* call) { return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF); } GenTree* Offset(GenTree* base, unsigned offset) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset); } GenTree* OffsetByIndex(GenTree* base, GenTree* index) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0); } GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0); } // Replace the definition of the given use with a lclVar, allocating a new temp // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node. GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM) { GenTree* oldUseNode = use.Def(); if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM)) { GenTree* assign; use.ReplaceWithLclVar(comp, tempNum, &assign); GenTree* newUseNode = use.Def(); ContainCheckRange(oldUseNode->gtNext, newUseNode); // We need to lower the LclVar and assignment since there may be certain // types or scenarios, such as TYP_SIMD12, that need special handling LowerNode(assign); LowerNode(newUseNode); return newUseNode->AsLclVar(); } return oldUseNode->AsLclVar(); } // return true if this call target is within range of a pc-rel call on the machine bool IsCallTargetInRange(void* addr); #if defined(TARGET_XARCH) GenTree* PreferredRegOptionalOperand(GenTree* tree); // ------------------------------------------------------------------ // SetRegOptionalBinOp - Indicates which of the operands of a bin-op // register requirement is optional. Xarch instruction set allows // either of op1 or op2 of binary operation (e.g. add, mul etc) to be // a memory operand. This routine provides info to register allocator // which of its operands optionally require a register. Lsra might not // allocate a register to RefTypeUse positions of such operands if it // is beneficial. In such a case codegen will treat them as memory // operands. // // Arguments: // tree - Gentree of a binary operation. // isSafeToMarkOp1 True if it's safe to mark op1 as register optional // isSafeToMarkOp2 True if it's safe to mark op2 as register optional // // Returns // The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2 // by calling IsSafeToContainMem. // // Note: On xarch at most only one of the operands will be marked as // reg optional, even when both operands could be considered register // optional. void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2) { assert(GenTree::OperIsBinary(tree->OperGet())); GenTree* const op1 = tree->gtGetOp1(); GenTree* const op2 = tree->gtGetOp2(); const unsigned operatorSize = genTypeSize(tree->TypeGet()); const bool op1Legal = isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet())); const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet())); GenTree* regOptionalOperand = nullptr; if (op1Legal) { regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1; } else if (op2Legal) { regOptionalOperand = op2; } if (regOptionalOperand != nullptr) { regOptionalOperand->SetRegOptional(); } } #endif // defined(TARGET_XARCH) // Per tree node member functions void LowerStoreIndirCommon(GenTreeStoreInd* ind); void LowerIndir(GenTreeIndir* ind); void LowerStoreIndir(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); bool LowerUnsignedDivOrMod(GenTreeOp* divMod); GenTree* LowerConstIntDivOrMod(GenTree* node); GenTree* LowerSignedDivOrMod(GenTree* node); void LowerBlockStore(GenTreeBlk* blkNode); void LowerBlockStoreCommon(GenTreeBlk* blkNode); void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr); void LowerPutArgStk(GenTreePutArgStk* tree); bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent); bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode); GenTree* LowerSwitch(GenTree* node); bool TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); void LowerCast(GenTree* node); #if !CPU_LOAD_STORE_ARCH bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd); bool IsBinOpInRMWStoreInd(GenTree* tree); bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource); bool LowerRMWMemOp(GenTreeIndir* storeInd); #endif void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node); bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc); void LowerStoreLoc(GenTreeLclVarCommon* tree); GenTree* LowerArrElem(GenTree* node); void LowerRotate(GenTree* tree); void LowerShift(GenTreeOp* shift); #ifdef FEATURE_SIMD void LowerSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void LowerHWIntrinsic(GenTreeHWIntrinsic* node); void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node); GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); #endif // !TARGET_XARCH && !TARGET_ARM64 union VectorConstant { int8_t i8[32]; uint8_t u8[32]; int16_t i16[16]; uint16_t u16[16]; int32_t i32[8]; uint32_t u32[8]; int64_t i64[4]; uint64_t u64[4]; float f32[8]; double f64[4]; }; //---------------------------------------------------------------------------------------------- // VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality // // Arguments: // vecCns - Constant vector // count - Amount of i64 components to compare // // Returns: // true if N i64 elements of the given vector are equal static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count) { assert(count >= 1 && count <= 4); for (int i = 1; i < count; i++) { if (vecCns.i64[i] != vecCns.i64[0]) { return false; } } return true; } //---------------------------------------------------------------------------------------------- // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method // // Arguments: // arg - The argument to process // argIdx - The index of the argument being processed // vecCns - The vector constant being constructed // baseType - The base type of the vector constant // // Returns: // true if arg was a constant; otherwise, false static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType) { switch (baseType) { case TYP_BYTE: case TYP_UBYTE: { if (arg->IsCnsIntOrI()) { vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i8[argIdx] == 0); } break; } case TYP_SHORT: case TYP_USHORT: { if (arg->IsCnsIntOrI()) { vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i16[argIdx] == 0); } break; } case TYP_INT: case TYP_UINT: { if (arg->IsCnsIntOrI()) { vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i32[argIdx] == 0); } break; } case TYP_LONG: case TYP_ULONG: { #if defined(TARGET_64BIT) if (arg->IsCnsIntOrI()) { vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal); return true; } #else if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI()) { // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT // We need to reconstruct the 64-bit value in order to handle this INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal; gtLconVal <<= 32; gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal; vecCns.i64[argIdx] = gtLconVal; return true; } #endif // TARGET_64BIT else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i64[argIdx] == 0); } break; } case TYP_FLOAT: { if (arg->IsCnsFltOrDbl()) { vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i32, rather than f32, to account for -0.0 assert(vecCns.i32[argIdx] == 0); } break; } case TYP_DOUBLE: { if (arg->IsCnsFltOrDbl()) { vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i64, rather than f64, to account for -0.0 assert(vecCns.i64[argIdx] == 0); } break; } default: { unreached(); } } return false; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at // least the size of expectedType // // Arguments: // expectedType - The expected type of the cast operation input if it is to be removed // op - The tree to remove if it is a cast op whose input is at least the size of expectedType // // Returns: // op if it was not a cast node or if its input is not at least the size of expected type; // Otherwise, it returns the underlying operation that was being casted GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op) { if (!op->OperIs(GT_CAST)) { return op; } GenTree* castOp = op->AsCast()->CastOp(); if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType)) { BlockRange().Remove(op); return castOp; } return op; } // Utility functions public: static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB); // return true if 'childNode' is an immediate that can be contained // by the 'parentNode' (i.e. folded into an instruction) // for example small enough and non-relocatable bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const; // Return true if 'node' is a containable memory op. bool IsContainableMemoryOp(GenTree* node) const { return m_lsra->isContainableMemoryOp(node); } #ifdef FEATURE_HW_INTRINSICS // Tries to get a containable node for a given HWIntrinsic bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode = nullptr); #endif // FEATURE_HW_INTRINSICS static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block); private: static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd); bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index); // Makes 'childNode' contained in the 'parentNode' void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const; // Checks and makes 'childNode' contained in the 'parentNode' bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode); // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode // can be contained. bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const; // Similar to above, but allows bypassing a "transparent" parent. bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const; inline LIR::Range& BlockRange() const { return LIR::AsRange(m_block); } // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister. // This method checks, and asserts in the DEBUG case if it is not so marked, // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code. // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD. void verifyLclFldDoNotEnregister(unsigned lclNum) { LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); // Do a couple of simple checks before setting lvDoNotEnregister. // This may not cover all cases in 'isRegCandidate()' but we don't want to // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister. if (varDsc->lvTracked && !varDsc->lvDoNotEnregister) { assert(!m_lsra->isRegCandidate(varDsc)); comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } } LinearScan* m_lsra; unsigned vtableCallTemp; // local variable we use as a temp for vtable calls mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate BasicBlock* m_block; }; #endif // _LOWER_H_
1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/jit/lowerxarch.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lowering for AMD64, x86 XX XX XX XX This encapsulates all the logic for lowering trees for the AMD64 XX XX architecture. For a more detailed view of what is lowering, please XX XX take a look at Lower.cpp XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef TARGET_XARCH // This file is only used for xarch #include "jit.h" #include "sideeffects.h" #include "lower.h" // xarch supports both ROL and ROR instructions so no lowering is required. void Lowering::LowerRotate(GenTree* tree) { ContainCheckShiftRotate(tree->AsOp()); } //------------------------------------------------------------------------ // LowerStoreLoc: Lower a store of a lclVar // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This involves: // - Handling of contained immediates. // - Widening operations of unsigneds. void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc) { // Try to widen the ops if they are going into a local var. if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (storeLoc->gtOp1->gtOper == GT_CNS_INT)) { GenTreeIntCon* con = storeLoc->gtOp1->AsIntCon(); ssize_t ival = con->gtIconVal; LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc); if (varDsc->lvIsSIMDType()) { noway_assert(storeLoc->gtType != TYP_STRUCT); } unsigned size = genTypeSize(storeLoc); // If we are storing a constant into a local variable // we extend the size of the store here if ((size < 4) && !varTypeIsStruct(varDsc)) { if (!varTypeIsUnsigned(varDsc)) { if (genTypeSize(storeLoc) == 1) { if ((ival & 0x7f) != ival) { ival = ival | 0xffffff00; } } else { assert(genTypeSize(storeLoc) == 2); if ((ival & 0x7fff) != ival) { ival = ival | 0xffff0000; } } } // A local stack slot is at least 4 bytes in size, regardless of // what the local var is typed as, so auto-promote it here // unless it is a field of a promoted struct // TODO-XArch-CQ: if the field is promoted shouldn't we also be able to do this? if (!varDsc->lvIsStructField) { storeLoc->gtType = TYP_INT; con->SetIconValue(ival); } } } if (storeLoc->OperIs(GT_STORE_LCL_FLD)) { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(storeLoc->GetLclNum()); } ContainCheckStoreLoc(storeLoc); } //------------------------------------------------------------------------ // LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained. // // Arguments: // node - The indirect store node (GT_STORE_IND) of interest // // Return Value: // None. // void Lowering::LowerStoreIndir(GenTreeStoreInd* node) { // Mark all GT_STOREIND nodes to indicate that it is not known // whether it represents a RMW memory op. node->SetRMWStatusDefault(); if (!varTypeIsFloating(node)) { // Perform recognition of trees with the following structure: // StoreInd(addr, BinOp(expr, GT_IND(addr))) // to be able to fold this into an instruction of the form // BINOP [addr], register // where register is the actual place where 'expr' is computed. // // SSE2 doesn't support RMW form of instructions. if (LowerRMWMemOp(node)) { return; } } // Optimization: do not unnecessarily zero-extend the result of setcc. if (varTypeIsByte(node) && (node->Data()->OperIsCompare() || node->Data()->OperIs(GT_SETCC))) { node->Data()->ChangeType(TYP_BYTE); } ContainCheckStoreIndir(node); } //------------------------------------------------------------------------ // LowerMul: Lower a GT_MUL/GT_MULHI/GT_MUL_LONG node. // // Currently only performs containment checks. // // Arguments: // mul - The node to lower // // Return Value: // The next node to lower. // GenTree* Lowering::LowerMul(GenTreeOp* mul) { assert(mul->OperIsMul()); ContainCheckMul(mul); return mul->gtNext; } //------------------------------------------------------------------------ // LowerBinaryArithmetic: lowers the given binary arithmetic node. // // Recognizes opportunities for using target-independent "combined" nodes // Performs containment checks. // // Arguments: // node - the arithmetic node to lower // // Returns: // The next node to lower. // GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp) { #ifdef FEATURE_HW_INTRINSICS if (comp->opts.OptimizationEnabled() && binOp->OperIs(GT_AND) && varTypeIsIntegral(binOp)) { GenTree* replacementNode = TryLowerAndOpToAndNot(binOp); if (replacementNode != nullptr) { return replacementNode->gtNext; } replacementNode = TryLowerAndOpToResetLowestSetBit(binOp); if (replacementNode != nullptr) { return replacementNode->gtNext; } } #endif ContainCheckBinary(binOp); return binOp->gtNext; } //------------------------------------------------------------------------ // LowerBlockStore: Lower a block store node // // Arguments: // blkNode - The block store node to lower // void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { TryCreateAddrMode(blkNode->Addr(), false, blkNode); GenTree* dstAddr = blkNode->Addr(); GenTree* src = blkNode->Data(); unsigned size = blkNode->Size(); if (blkNode->OperIsInitBlkOp()) { if (src->OperIs(GT_INIT_VAL)) { src->SetContained(); src = src->AsUnOp()->gtGetOp1(); } if (blkNode->OperIs(GT_STORE_OBJ)) { blkNode->SetOper(GT_STORE_BLK); } if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= INITBLK_UNROLL_LIMIT)) { if (!src->OperIs(GT_CNS_INT)) { // TODO-CQ: We could unroll even when the initialization value is not a constant // by inserting a MUL init, 0x01010101 instruction. We need to determine if the // extra latency that MUL introduces isn't worse that rep stosb. Likely not. blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; } else { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; // The fill value of an initblk is interpreted to hold a // value of (unsigned int8) however a constant of any size // may practically reside on the evaluation stack. So extract // the lower byte out of the initVal constant and replicate // it to a larger constant whose size is sufficient to support // the largest width store of the desired inline expansion. ssize_t fill = src->AsIntCon()->IconValue() & 0xFF; if (fill == 0) { if (size >= XMM_REGSIZE_BYTES) { const bool canUse16BytesSimdMov = !blkNode->IsOnHeapAndContainsReferences(); #ifdef TARGET_AMD64 const bool willUseOnlySimdMov = canUse16BytesSimdMov && (size % XMM_REGSIZE_BYTES == 0); #else const bool willUseOnlySimdMov = (size % 8 == 0); #endif if (willUseOnlySimdMov) { src->SetContained(); } } } #ifdef TARGET_AMD64 else if (size >= REGSIZE_BYTES) { fill *= 0x0101010101010101LL; src->gtType = TYP_LONG; } #endif else { fill *= 0x01010101; } src->AsIntCon()->SetIconValue(fill); ContainBlockStoreAddress(blkNode, size, dstAddr); } } else { #ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; #endif } } else { assert(src->OperIs(GT_IND, GT_LCL_VAR, GT_LCL_FLD)); src->SetContained(); if (src->OperIs(GT_IND)) { // TODO-Cleanup: Make sure that GT_IND lowering didn't mark the source address as contained. // Sometimes the GT_IND type is a non-struct type and then GT_IND lowering may contain the // address, not knowing that GT_IND is part of a block op that has containment restrictions. src->AsIndir()->Addr()->ClearContained(); } else if (src->OperIs(GT_LCL_VAR)) { // TODO-1stClassStructs: for now we can't work with STORE_BLOCK source in register. const unsigned srcLclNum = src->AsLclVar()->GetLclNum(); comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::StoreBlkSrc)); } if (blkNode->OperIs(GT_STORE_OBJ)) { if (!blkNode->AsObj()->GetLayout()->HasGCPtr()) { blkNode->SetOper(GT_STORE_BLK); } #ifndef JIT32_GCENCODER else if (dstAddr->OperIsLocalAddr() && (size <= CPBLK_UNROLL_LIMIT)) { // If the size is small enough to unroll then we need to mark the block as non-interruptible // to actually allow unrolling. The generated code does not report GC references loaded in the // temporary register(s) used for copying. // This is not supported for the JIT32_GCENCODER. blkNode->SetOper(GT_STORE_BLK); blkNode->gtBlkOpGcUnsafe = true; } #endif } if (blkNode->OperIs(GT_STORE_OBJ)) { assert((dstAddr->TypeGet() == TYP_BYREF) || (dstAddr->TypeGet() == TYP_I_IMPL)); // If we have a long enough sequence of slots that do not require write barriers then // we can use REP MOVSD/Q instead of a sequence of MOVSD/Q instructions. According to the // Intel Manual, the sweet spot for small structs is between 4 to 12 slots of size where // the entire operation takes 20 cycles and encodes in 5 bytes (loading RCX and REP MOVSD/Q). unsigned nonGCSlots = 0; if (dstAddr->OperIsLocalAddr()) { // If the destination is on the stack then no write barriers are needed. nonGCSlots = blkNode->GetLayout()->GetSlotCount(); } else { // Otherwise a write barrier is needed for every GC pointer in the layout // so we need to check if there's a long enough sequence of non-GC slots. ClassLayout* layout = blkNode->GetLayout(); unsigned slots = layout->GetSlotCount(); for (unsigned i = 0; i < slots; i++) { if (layout->IsGCPtr(i)) { nonGCSlots = 0; } else { nonGCSlots++; if (nonGCSlots >= CPOBJ_NONGC_SLOTS_LIMIT) { break; } } } } if (nonGCSlots >= CPOBJ_NONGC_SLOTS_LIMIT) { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; } else { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; } } else if (blkNode->OperIs(GT_STORE_BLK) && (size <= CPBLK_UNROLL_LIMIT)) { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; if (src->OperIs(GT_IND)) { ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr()); } ContainBlockStoreAddress(blkNode, size, dstAddr); } else { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK)); #ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; #endif } } } //------------------------------------------------------------------------ // ContainBlockStoreAddress: Attempt to contain an address used by an unrolled block store. // // Arguments: // blkNode - the block store node // size - the block size // addr - the address node to try to contain // void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr) { assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)); assert(size < INT32_MAX); if (addr->OperIsLocalAddr()) { addr->SetContained(); return; } if (!addr->OperIsAddrMode() && !TryCreateAddrMode(addr, true, blkNode)) { return; } GenTreeAddrMode* addrMode = addr->AsAddrMode(); // On x64 the address mode displacement is signed so it must not exceed INT32_MAX. This check is // an approximation since the last displacement we generate in an unrolled block operation can be // up to 16 bytes lower than offset + size. But offsets large enough to hit this case are likely // to be extremely rare for this to ever be a CQ issue. // On x86 this shouldn't be needed but then again, offsets large enough to hit this are rare. if (addrMode->Offset() > (INT32_MAX - static_cast<int>(size))) { return; } // Note that the parentNode is always the block node, even if we're dealing with the source address. // The source address is not directly used by the block node but by an IND node and that IND node is // always contained. if (!IsSafeToContainMem(blkNode, addrMode)) { return; } addrMode->SetContained(); } //------------------------------------------------------------------------ // LowerPutArgStk: Lower a GT_PUTARG_STK. // // Arguments: // tree - The node of interest // // Return Value: // None. // void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) { GenTree* src = putArgStk->gtGetOp1(); if (src->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_X86 putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid; GenTreeFieldList* fieldList = src->AsFieldList(); // The code generator will push these fields in reverse order by offset. Reorder the list here s.t. the order // of uses is visible to LSRA. assert(fieldList->Uses().IsSorted()); fieldList->Uses().Reverse(); // Now that the fields have been sorted, the kind of code we will generate. bool allFieldsAreSlots = true; unsigned prevOffset = putArgStk->GetStackByteSize(); for (GenTreeFieldList::Use& use : fieldList->Uses()) { GenTree* const fieldNode = use.GetNode(); const unsigned fieldOffset = use.GetOffset(); assert(!fieldNode->TypeIs(TYP_LONG)); // We can treat as a slot any field that is stored at a slot boundary, where the previous // field is not in the same slot. (Note that we store the fields in reverse order.) const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevOffset - fieldOffset) >= 4); if (!fieldIsSlot) { allFieldsAreSlots = false; } // For x86 we must mark all integral fields as contained or reg-optional, and handle them // accordingly in code generation, since we may have up to 8 fields, which cannot all be in // registers to be consumed atomically by the call. if (varTypeIsIntegralOrI(fieldNode)) { if (fieldNode->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(fieldNode->AsLclVarCommon()); if (!varDsc->lvDoNotEnregister) { fieldNode->SetRegOptional(); } else { MakeSrcContained(putArgStk, fieldNode); } } else if (fieldNode->IsIntCnsFitsInI32()) { MakeSrcContained(putArgStk, fieldNode); } else { // For the case where we cannot directly push the value, if we run out of registers, // it would be better to defer computation until we are pushing the arguments rather // than spilling, but this situation is not all that common, as most cases of promoted // structs do not have a large number of fields, and of those most are lclVars or // copy-propagated constants. fieldNode->SetRegOptional(); } } prevOffset = fieldOffset; } // Set the copy kind. // TODO-X86-CQ: Even if we are using push, if there are contiguous floating point fields, we should // adjust the stack once for those fields. The latter is really best done in code generation, but // this tuning should probably be undertaken as a whole. // Also, if there are floating point fields, it may be better to use the "Unroll" mode // of copying the struct as a whole, if the fields are not register candidates. if (allFieldsAreSlots) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::PushAllSlots; } else { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } #endif // TARGET_X86 return; } #ifdef FEATURE_PUT_STRUCT_ARG_STK if (src->TypeGet() != TYP_STRUCT) #endif // FEATURE_PUT_STRUCT_ARG_STK { // If the child of GT_PUTARG_STK is a constant, we don't need a register to // move it to memory (stack location). // // On AMD64, we don't want to make 0 contained, because we can generate smaller code // by zeroing a register and then storing it. E.g.: // xor rdx, rdx // mov gword ptr [rsp+28H], rdx // is 2 bytes smaller than: // mov gword ptr [rsp+28H], 0 // // On x86, we push stack arguments; we don't use 'mov'. So: // push 0 // is 1 byte smaller than: // xor rdx, rdx // push rdx if (IsContainableImmed(putArgStk, src) #if defined(TARGET_AMD64) && !src->IsIntegralConst(0) #endif // TARGET_AMD64 ) { MakeSrcContained(putArgStk, src); } return; } #ifdef FEATURE_PUT_STRUCT_ARG_STK GenTree* srcAddr = nullptr; bool haveLocalAddr = false; if ((src->OperGet() == GT_OBJ) || (src->OperGet() == GT_IND)) { srcAddr = src->AsOp()->gtOp1; assert(srcAddr != nullptr); haveLocalAddr = srcAddr->OperIsLocalAddr(); } else { assert(varTypeIsSIMD(putArgStk)); } ClassLayout* layout = src->AsObj()->GetLayout(); // In case of a CpBlk we could use a helper call. In case of putarg_stk we // can't do that since the helper call could kill some already set up outgoing args. // TODO-Amd64-Unix: converge the code for putarg_stk with cpyblk/cpyobj. // The cpyXXXX code is rather complex and this could cause it to be more complex, but // it might be the right thing to do. unsigned size = putArgStk->GetStackByteSize(); // TODO-X86-CQ: The helper call either is not supported on x86 or required more work // (I don't know which). if (!layout->HasGCPtr()) { #ifdef TARGET_X86 if (size < XMM_REGSIZE_BYTES) { // Codegen for "Kind::Push" will always load bytes in TARGET_POINTER_SIZE // chunks. As such, the correctness of this code depends on the fact that // morph will copy any "mis-sized" (too small) non-local OBJs into a temp, // thus preventing any possible out-of-bounds memory reads. assert(((layout->GetSize() % TARGET_POINTER_SIZE) == 0) || src->OperIsLocalRead() || (src->OperIsIndir() && src->AsIndir()->Addr()->IsLocalAddrExpr())); putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } else #endif // TARGET_X86 if (size <= CPBLK_UNROLL_LIMIT) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; } else { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; } } else // There are GC pointers. { #ifdef TARGET_X86 // On x86, we must use `push` to store GC references to the stack in order for the emitter to properly update // the function's GC info. These `putargstk` nodes will generate a sequence of `push` instructions. putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; #else // !TARGET_X86 putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::PartialRepInstr; #endif // !TARGET_X86 } // Always mark the OBJ and ADDR as contained trees by the putarg_stk. The codegen will deal with this tree. MakeSrcContained(putArgStk, src); if (haveLocalAddr) { // If the source address is the address of a lclVar, make the source address contained to avoid unnecessary // copies. // MakeSrcContained(putArgStk, srcAddr); } #endif // FEATURE_PUT_STRUCT_ARG_STK } /* Lower GT_CAST(srcType, DstType) nodes. * * Casts from small int type to float/double are transformed as follows: * GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double) * GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double) * GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double) * GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double) * * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64 * are morphed as follows by front-end and hence should not be seen here. * GT_CAST(uint32, float/double) = GT_CAST(GT_CAST(uint32, long), float/double) * GT_CAST(uint64, float) = GT_CAST(GT_CAST(uint64, double), float) * * * Similarly casts from float/double to a smaller int type are transformed as follows: * GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte) * GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte) * GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16) * GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16) * * SSE2 has instructions to convert a float/double vlaue into a signed 32/64-bit * integer. The above transformations help us to leverage those instructions. * * Note that for the following conversions we still depend on helper calls and * don't expect to see them here. * i) GT_CAST(float/double, uint64) * ii) GT_CAST(float/double, int type with overflow detection) * * TODO-XArch-CQ: (Low-pri): Jit64 generates in-line code of 8 instructions for (i) above. * There are hardly any occurrences of this conversion operation in platform * assemblies or in CQ perf benchmarks (1 occurrence in corelib, microsoft.jscript, * 1 occurrence in Roslyn and no occurrences in system, system.core, system.numerics * system.windows.forms, scimark, fractals, bio mums). If we ever find evidence that * doing this optimization is a win, should consider generating in-lined code. */ void Lowering::LowerCast(GenTree* tree) { assert(tree->OperGet() == GT_CAST); GenTree* castOp = tree->AsCast()->CastOp(); var_types castToType = tree->CastToType(); var_types srcType = castOp->TypeGet(); var_types tmpType = TYP_UNDEF; // force the srcType to unsigned if GT_UNSIGNED flag is set if (tree->gtFlags & GTF_UNSIGNED) { srcType = varTypeToUnsigned(srcType); } // We should never see the following casts as they are expected to be lowered // apropriately or converted into helper calls by front-end. // srcType = float/double castToType = * and overflow detecting cast // Reason: must be converted to a helper call // srcType = float/double, castToType = ulong // Reason: must be converted to a helper call // srcType = uint castToType = float/double // Reason: uint -> float/double = uint -> long -> float/double // srcType = ulong castToType = float // Reason: ulong -> float = ulong -> double -> float if (varTypeIsFloating(srcType)) { noway_assert(!tree->gtOverflow()); noway_assert(castToType != TYP_ULONG); } else if (srcType == TYP_UINT) { noway_assert(!varTypeIsFloating(castToType)); } else if (srcType == TYP_ULONG) { noway_assert(castToType != TYP_FLOAT); } // Case of src is a small type and dst is a floating point type. if (varTypeIsSmall(srcType) && varTypeIsFloating(castToType)) { // These conversions can never be overflow detecting ones. noway_assert(!tree->gtOverflow()); tmpType = TYP_INT; } // case of src is a floating point type and dst is a small type. else if (varTypeIsFloating(srcType) && varTypeIsSmall(castToType)) { tmpType = TYP_INT; } if (tmpType != TYP_UNDEF) { GenTree* tmp = comp->gtNewCastNode(tmpType, castOp, tree->IsUnsigned(), tmpType); tmp->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp1 = tmp; BlockRange().InsertAfter(castOp, tmp); ContainCheckCast(tmp->AsCast()); } // Now determine if we have operands that should be contained. ContainCheckCast(tree->AsCast()); } #ifdef FEATURE_SIMD //---------------------------------------------------------------------------------------------- // Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node. // // Arguments: // simdNode - The SIMD intrinsic node. // void Lowering::LowerSIMD(GenTreeSIMD* simdNode) { if (simdNode->TypeGet() == TYP_SIMD12) { // GT_SIMD node requiring to produce TYP_SIMD12 in fact // produces a TYP_SIMD16 result simdNode->gtType = TYP_SIMD16; } if (simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN) { assert(simdNode->GetSimdBaseType() == TYP_FLOAT); size_t argCount = simdNode->GetOperandCount(); size_t constArgCount = 0; float constArgValues[4]{0, 0, 0, 0}; for (GenTree* arg : simdNode->Operands()) { assert(arg->TypeIs(simdNode->GetSimdBaseType())); if (arg->IsCnsFltOrDbl()) { constArgValues[constArgCount] = static_cast<float>(arg->AsDblCon()->gtDconVal); constArgCount++; } } if (constArgCount == argCount) { for (GenTree* arg : simdNode->Operands()) { BlockRange().Remove(arg); } assert(sizeof(constArgValues) == 16); unsigned cnsSize = sizeof(constArgValues); unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : 1; CORINFO_FIELD_HANDLE hnd = comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->GetSimdBaseType()); GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr); BlockRange().InsertBefore(simdNode, clsVarAddr); simdNode->ChangeOper(GT_IND); simdNode->AsOp()->gtOp1 = clsVarAddr; ContainCheckIndir(simdNode->AsIndir()); return; } } ContainCheckSIMD(simdNode); } #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // LowerHWIntrinsicCC: Lowers a hardware intrinsic node that produces a boolean value by // setting the condition flags. // // Arguments: // node - The hardware intrinsic node // newIntrinsicId - The intrinsic id of the lowered intrinsic node // condition - The condition code of the generated SETCC/JCC node // void Lowering::LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition) { GenTreeCC* cc = LowerNodeCC(node, condition); assert(HWIntrinsicInfo::lookupNumArgs(newIntrinsicId) == 2); node->ChangeHWIntrinsicId(newIntrinsicId); node->gtType = TYP_VOID; node->ClearUnusedValue(); bool swapOperands = false; bool canSwapOperands = false; switch (newIntrinsicId) { case NI_SSE_COMISS: case NI_SSE_UCOMISS: case NI_SSE2_COMISD: case NI_SSE2_UCOMISD: // In some cases we can generate better code if we swap the operands: // - If the condition is not one of the "preferred" floating point conditions we can swap // the operands and change the condition to avoid generating an extra JP/JNP branch. // - If the first operand can be contained but the second cannot, we can swap operands in // order to be able to contain the first operand and avoid the need for a temp reg. // We can't handle both situations at the same time and since an extra branch is likely to // be worse than an extra temp reg (x64 has a reasonable number of XMM registers) we'll favor // the branch case: // - If the condition is not preferred then swap, even if doing this will later prevent // containment. // - Allow swapping for containment purposes only if this doesn't result in a non-"preferred" // condition being generated. if ((cc != nullptr) && cc->gtCondition.PreferSwap()) { swapOperands = true; } else { canSwapOperands = (cc == nullptr) || !GenCondition::Swap(cc->gtCondition).PreferSwap(); } break; case NI_SSE41_PTEST: case NI_AVX_PTEST: // If we need the Carry flag then we can't swap operands. canSwapOperands = (cc == nullptr) || cc->gtCondition.Is(GenCondition::EQ, GenCondition::NE); break; default: unreached(); } if (canSwapOperands) { bool op1SupportsRegOptional = false; bool op2SupportsRegOptional = false; if (!TryGetContainableHWIntrinsicOp(node, &node->Op(2), &op2SupportsRegOptional) && TryGetContainableHWIntrinsicOp(node, &node->Op(1), &op1SupportsRegOptional)) { // Swap operands if op2 cannot be contained but op1 can. swapOperands = true; } } if (swapOperands) { std::swap(node->Op(1), node->Op(2)); if (cc != nullptr) { cc->gtCondition = GenCondition::Swap(cc->gtCondition); } } } //---------------------------------------------------------------------------------------------- // LowerFusedMultiplyAdd: Changes NI_FMA_MultiplyAddScalar produced by Math(F).FusedMultiplyAdd // to a better FMA intrinsics if there are GT_NEG around in order to eliminate them. // // Arguments: // node - The hardware intrinsic node // // Notes: // Math(F).FusedMultiplyAdd is expanded into NI_FMA_MultiplyAddScalar and // depending on additional GT_NEG nodes around it can be: // // x * y + z -> NI_FMA_MultiplyAddScalar // x * -y + z -> NI_FMA_MultiplyAddNegatedScalar // -x * y + z -> NI_FMA_MultiplyAddNegatedScalar // -x * -y + z -> NI_FMA_MultiplyAddScalar // x * y - z -> NI_FMA_MultiplySubtractScalar // x * -y - z -> NI_FMA_MultiplySubtractNegatedScalar // -x * y - z -> NI_FMA_MultiplySubtractNegatedScalar // -x * -y - z -> NI_FMA_MultiplySubtractScalar // void Lowering::LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node) { assert(node->GetHWIntrinsicId() == NI_FMA_MultiplyAddScalar); GenTreeHWIntrinsic* createScalarOps[3]; for (size_t i = 1; i <= 3; i++) { GenTree* arg = node->Op(i); if (!arg->OperIsHWIntrinsic() || (arg->AsHWIntrinsic()->GetHWIntrinsicId() != NI_Vector128_CreateScalarUnsafe)) { return; } createScalarOps[i - 1] = arg->AsHWIntrinsic(); } GenTree* argX = createScalarOps[0]->Op(1); GenTree* argY = createScalarOps[1]->Op(1); GenTree* argZ = createScalarOps[2]->Op(1); const bool negMul = argX->OperIs(GT_NEG) != argY->OperIs(GT_NEG); if (argX->OperIs(GT_NEG)) { createScalarOps[0]->Op(1) = argX->gtGetOp1(); BlockRange().Remove(argX); } if (argY->OperIs(GT_NEG)) { createScalarOps[1]->Op(1) = argY->gtGetOp1(); BlockRange().Remove(argY); } if (argZ->OperIs(GT_NEG)) { createScalarOps[2]->Op(1) = argZ->gtGetOp1(); BlockRange().Remove(argZ); node->ChangeHWIntrinsicId(negMul ? NI_FMA_MultiplySubtractNegatedScalar : NI_FMA_MultiplySubtractScalar); } else { node->ChangeHWIntrinsicId(negMul ? NI_FMA_MultiplyAddNegatedScalar : NI_FMA_MultiplyAddScalar); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node. // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) { if (node->TypeGet() == TYP_SIMD12) { // GT_HWINTRINSIC node requiring to produce TYP_SIMD12 in fact // produces a TYP_SIMD16 result node->gtType = TYP_SIMD16; } NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); switch (intrinsicId) { case NI_Vector128_Create: case NI_Vector256_Create: { // We don't directly support the Vector128.Create or Vector256.Create methods in codegen // and instead lower them to other intrinsic nodes in LowerHWIntrinsicCreate so we expect // that the node is modified to either not be a HWIntrinsic node or that it is no longer // the same intrinsic as when it came in. In the case of Vector256.Create, we may lower // it into 2x Vector128.Create intrinsics which themselves are also lowered into other // intrinsics that are not Vector*.Create LowerHWIntrinsicCreate(node); assert(!node->OperIsHWIntrinsic() || (node->GetHWIntrinsicId() != intrinsicId)); LowerNode(node); return; } case NI_Vector128_Dot: case NI_Vector256_Dot: { LowerHWIntrinsicDot(node); return; } case NI_Vector128_GetElement: case NI_Vector256_GetElement: { LowerHWIntrinsicGetElement(node); if ((node->GetHWIntrinsicId() == NI_Vector128_GetElement) || (node->GetHWIntrinsicId() == NI_Vector256_GetElement)) { // Most NI_Vector*_GetElement intrinsics are lowered to // alternative nodes, such as the Extract intrinsics, // which are themselves lowered. // // However, certain types may not have a direct equivalent // in which case we specially handle them directly as GetElement // and want to do the relevant containment checks. break; } return; } case NI_Vector128_WithElement: case NI_Vector256_WithElement: { LowerHWIntrinsicWithElement(node); return; } case NI_Vector128_op_Equality: case NI_Vector256_op_Equality: { LowerHWIntrinsicCmpOp(node, GT_EQ); return; } case NI_Vector128_op_Inequality: case NI_Vector256_op_Inequality: { LowerHWIntrinsicCmpOp(node, GT_NE); return; } case NI_Vector128_ToScalar: case NI_Vector256_ToScalar: { LowerHWIntrinsicToScalar(node); break; } case NI_SSE41_Extract: { if (varTypeIsFloating(node->GetSimdBaseType())) { assert(node->GetSimdBaseType() == TYP_FLOAT); assert(node->GetSimdSize() == 16); GenTree* op2 = node->Op(2); if (!op2->OperIsConst()) { // Extract allows the full range while GetElement only allows // 0-3, so we need to mask the index here so codegen works. GenTree* msk = comp->gtNewIconNode(3, TYP_INT); BlockRange().InsertAfter(op2, msk); GenTree* tmp = comp->gtNewOperNode(GT_AND, TYP_INT, op2, msk); BlockRange().InsertAfter(msk, tmp); LowerNode(tmp); node->Op(2) = tmp; } node->ChangeHWIntrinsicId(NI_Vector128_GetElement); LowerNode(node); } break; } case NI_SSE2_Insert: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: { assert(node->GetOperandCount() == 3); // Insert takes either a 32-bit register or a memory operand. // In either case, only SimdBaseType bits are read and so // widening or narrowing the operand may be unnecessary and it // can just be used directly. node->Op(2) = TryRemoveCastIfPresent(node->GetSimdBaseType(), node->Op(2)); break; } case NI_SSE42_Crc32: { assert(node->GetOperandCount() == 2); // Crc32 takes either a bit register or a memory operand. // In either case, only gtType bits are read and so widening // or narrowing the operand may be unnecessary and it can // just be used directly. node->Op(2) = TryRemoveCastIfPresent(node->TypeGet(), node->Op(2)); break; } case NI_SSE2_CompareGreaterThan: { if (node->GetSimdBaseType() != TYP_DOUBLE) { assert(varTypeIsIntegral(node->GetSimdBaseType())); break; } FALLTHROUGH; } case NI_SSE_CompareGreaterThan: case NI_SSE_CompareGreaterThanOrEqual: case NI_SSE_CompareNotGreaterThan: case NI_SSE_CompareNotGreaterThanOrEqual: case NI_SSE2_CompareGreaterThanOrEqual: case NI_SSE2_CompareNotGreaterThan: case NI_SSE2_CompareNotGreaterThanOrEqual: { assert((node->GetSimdBaseType() == TYP_FLOAT) || (node->GetSimdBaseType() == TYP_DOUBLE)); if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX)) { break; } // pre-AVX doesn't actually support these intrinsics in hardware so we need to swap the operands around std::swap(node->Op(1), node->Op(2)); break; } case NI_SSE2_CompareLessThan: case NI_SSE42_CompareLessThan: case NI_AVX2_CompareLessThan: { if (node->GetSimdBaseType() == TYP_DOUBLE) { break; } assert(varTypeIsIntegral(node->GetSimdBaseType())); // this isn't actually supported in hardware so we need to swap the operands around std::swap(node->Op(1), node->Op(2)); break; } case NI_SSE_CompareScalarOrderedEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FEQ); break; case NI_SSE_CompareScalarOrderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FNEU); break; case NI_SSE_CompareScalarOrderedLessThan: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FLT); break; case NI_SSE_CompareScalarOrderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FLE); break; case NI_SSE_CompareScalarOrderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FGT); break; case NI_SSE_CompareScalarOrderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FGE); break; case NI_SSE_CompareScalarUnorderedEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FEQ); break; case NI_SSE_CompareScalarUnorderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FNEU); break; case NI_SSE_CompareScalarUnorderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FLE); break; case NI_SSE_CompareScalarUnorderedLessThan: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FLT); break; case NI_SSE_CompareScalarUnorderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FGE); break; case NI_SSE_CompareScalarUnorderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FGT); break; case NI_SSE2_CompareScalarOrderedEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FEQ); break; case NI_SSE2_CompareScalarOrderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FNEU); break; case NI_SSE2_CompareScalarOrderedLessThan: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FLT); break; case NI_SSE2_CompareScalarOrderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FLE); break; case NI_SSE2_CompareScalarOrderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FGT); break; case NI_SSE2_CompareScalarOrderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FGE); break; case NI_SSE2_CompareScalarUnorderedEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FEQ); break; case NI_SSE2_CompareScalarUnorderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FNEU); break; case NI_SSE2_CompareScalarUnorderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FLE); break; case NI_SSE2_CompareScalarUnorderedLessThan: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FLT); break; case NI_SSE2_CompareScalarUnorderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FGE); break; case NI_SSE2_CompareScalarUnorderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FGT); break; case NI_SSE41_TestC: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::C); break; case NI_SSE41_TestZ: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::EQ); break; case NI_SSE41_TestNotZAndNotC: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::UGT); break; case NI_AVX_TestC: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::C); break; case NI_AVX_TestZ: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::EQ); break; case NI_AVX_TestNotZAndNotC: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::UGT); break; case NI_FMA_MultiplyAddScalar: LowerFusedMultiplyAdd(node); break; default: break; } ContainCheckHWIntrinsic(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicCmpOp: Lowers a Vector128 or Vector256 comparison intrinsic // // Arguments: // node - The hardware intrinsic node. // cmpOp - The comparison operation, currently must be GT_EQ or GT_NE // void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality) || (intrinsicId == NI_Vector256_op_Equality) || (intrinsicId == NI_Vector256_op_Inequality)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); assert(node->gtType == TYP_BOOL); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): // /--* op2 simd // /--* op1 simd // node = * HWINTRINSIC simd T op_Equality GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); GenCondition cmpCnd = (cmpOp == GT_EQ) ? GenCondition::EQ : GenCondition::NE; if (op2->IsIntegralConstVector(0) && comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // On SSE4.1 or higher we can optimize comparisons against zero to // just use PTEST. We can't support it for floating-point, however, // as it has both +0.0 and -0.0 where +0.0 == -0.0 node->Op(1) = op1; BlockRange().Remove(op2); if (op2->AsMultiOp()->GetOperandCount() == 1) { // Some zero vectors are Create/Initialization nodes with a constant zero operand // We should also remove this to avoid dead code assert(op2->AsMultiOp()->Op(1)->IsIntegralConst(0)); BlockRange().Remove(op2->AsMultiOp()->Op(1)); } LIR::Use op1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(op1Use); op1 = node->Op(1); op2 = comp->gtClone(op1); BlockRange().InsertAfter(op1, op2); node->Op(2) = op2; if (simdSize == 32) { // TODO-Review: LowerHWIntrinsicCC resets the id again, so why is this needed? node->ChangeHWIntrinsicId(NI_AVX_TestZ); LowerHWIntrinsicCC(node, NI_AVX_PTEST, cmpCnd); } else { // TODO-Review: LowerHWIntrinsicCC resets the id again, so why is this needed? node->ChangeHWIntrinsicId(NI_SSE41_TestZ); LowerHWIntrinsicCC(node, NI_SSE41_PTEST, cmpCnd); } return; } NamedIntrinsic cmpIntrinsic; CorInfoType cmpJitType; NamedIntrinsic mskIntrinsic; CorInfoType mskJitType; int mskConstant; switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { cmpJitType = simdBaseJitType; mskJitType = CORINFO_TYPE_UBYTE; if (simdSize == 32) { cmpIntrinsic = NI_AVX2_CompareEqual; mskIntrinsic = NI_AVX2_MoveMask; mskConstant = -1; } else { assert(simdSize == 16); cmpIntrinsic = NI_SSE2_CompareEqual; mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0xFFFF; } break; } case TYP_LONG: case TYP_ULONG: { mskJitType = CORINFO_TYPE_UBYTE; if (simdSize == 32) { cmpIntrinsic = NI_AVX2_CompareEqual; cmpJitType = simdBaseJitType; mskIntrinsic = NI_AVX2_MoveMask; mskConstant = -1; } else { assert(simdSize == 16); if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { cmpIntrinsic = NI_SSE41_CompareEqual; cmpJitType = simdBaseJitType; } else { cmpIntrinsic = NI_SSE2_CompareEqual; cmpJitType = CORINFO_TYPE_UINT; } mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0xFFFF; } break; } case TYP_FLOAT: { cmpJitType = simdBaseJitType; mskJitType = simdBaseJitType; if (simdSize == 32) { cmpIntrinsic = NI_AVX_CompareEqual; mskIntrinsic = NI_AVX_MoveMask; mskConstant = 0xFF; } else { cmpIntrinsic = NI_SSE_CompareEqual; mskIntrinsic = NI_SSE_MoveMask; if (simdSize == 16) { mskConstant = 0xF; } else if (simdSize == 12) { mskConstant = 0x7; } else { assert(simdSize == 8); mskConstant = 0x3; } } break; } case TYP_DOUBLE: { cmpJitType = simdBaseJitType; mskJitType = simdBaseJitType; if (simdSize == 32) { cmpIntrinsic = NI_AVX_CompareEqual; mskIntrinsic = NI_AVX_MoveMask; mskConstant = 0xF; } else { assert(simdSize == 16); cmpIntrinsic = NI_SSE2_CompareEqual; mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0x3; } break; } default: { unreached(); } } GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpJitType, simdSize); BlockRange().InsertBefore(node, cmp); LowerNode(cmp); GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskJitType, simdSize); BlockRange().InsertAfter(cmp, msk); LowerNode(msk); GenTree* mskCns = comp->gtNewIconNode(mskConstant, TYP_INT); BlockRange().InsertAfter(msk, mskCns); if ((simdBaseType == TYP_FLOAT) && (simdSize < 16)) { // For TYP_SIMD8 and TYP_SIMD12 we need to clear the upper bits and can't assume their value GenTree* tmp = comp->gtNewOperNode(GT_AND, TYP_INT, msk, mskCns); BlockRange().InsertAfter(mskCns, tmp); LowerNode(tmp); msk = tmp; mskCns = comp->gtNewIconNode(mskConstant, TYP_INT); BlockRange().InsertAfter(msk, mskCns); } node->ChangeOper(cmpOp); node->ChangeType(TYP_INT); node->AsOp()->gtOp1 = msk; node->AsOp()->gtOp2 = mskCns; GenTree* cc = LowerNodeCC(node, cmpCnd); node->gtType = TYP_VOID; node->ClearUnusedValue(); LowerNode(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicCreate: Lowers a Vector128 or Vector256 Create call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->gtType; CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); VectorConstant vecCns = {}; if ((simdSize == 8) && (simdType == TYP_DOUBLE)) { // TODO-Cleanup: Struct retyping means we have the wrong type here. We need to // manually fix it up so the simdType checks below are correct. simdType = TYP_SIMD8; } assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTree* tmp3 = nullptr; size_t argCnt = node->GetOperandCount(); size_t cnsArgCnt = 0; // These intrinsics are meant to set the same value to every element. if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, vecCns, simdBaseType)) { // Now assign the rest of the arguments. for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++) { HandleArgForHWIntrinsicCreate(node->Op(1), i, vecCns, simdBaseType); } cnsArgCnt = 1; } else { for (unsigned i = 1; i <= argCnt; i++) { if (HandleArgForHWIntrinsicCreate(node->Op(i), i - 1, vecCns, simdBaseType)) { cnsArgCnt++; } } } assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType)))); if (argCnt == cnsArgCnt) { for (GenTree* arg : node->Operands()) { #if !defined(TARGET_64BIT) if (arg->OperIsLong()) { BlockRange().Remove(arg->AsOp()->gtGetOp1()); BlockRange().Remove(arg->AsOp()->gtGetOp2()); } #endif // !TARGET_64BIT BlockRange().Remove(arg); } assert((simdSize == 8) || (simdSize == 12) || (simdSize == 16) || (simdSize == 32)); if (((simdSize == 16) || (simdSize == 32)) && VectorConstantIsBroadcastedI64(vecCns, simdSize / 8)) { // If we are a single constant or if all parts are the same, we might be able to optimize // this even further for certain values, such as Zero or AllBitsSet. if (vecCns.i64[0] == 0) { node->ResetHWIntrinsicId((simdSize == 16) ? NI_Vector128_get_Zero : NI_Vector256_get_Zero); return; } else if (vecCns.i64[0] == -1) { node->ResetHWIntrinsicId((simdSize == 16) ? NI_Vector128_get_AllBitsSet : NI_Vector256_get_AllBitsSet); return; } } unsigned cnsSize = (simdSize != 12) ? simdSize : 16; unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : emitter::dataSection::MIN_DATA_ALIGN; var_types dataType = Compiler::getSIMDTypeForSize(simdSize); UNATIVE_OFFSET cnum = comp->GetEmitter()->emitDataConst(&vecCns, cnsSize, cnsAlign, dataType); CORINFO_FIELD_HANDLE hnd = comp->eeFindJitDataOffs(cnum); GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr); BlockRange().InsertBefore(node, clsVarAddr); node->ChangeOper(GT_IND); node->AsOp()->gtOp1 = clsVarAddr; // TODO-XARCH-CQ: We should be able to modify at least the paths that use Insert to trivially support partial // vector constants. With this, we can create a constant if say 50% of the inputs are also constant and just // insert the non-constant values which should still allow some gains. return; } else if (argCnt == 1) { // We have the following (where simd is simd16 or simd32): // /--* op1 T // node = * HWINTRINSIC simd T Create if (intrinsicId == NI_Vector256_Create) { if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) { // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // node = * HWINTRINSIC simd32 T BroadcastScalarToVector256 // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // return Avx2.BroadcastScalarToVector256(tmp1); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); node->ResetHWIntrinsicId(NI_AVX2_BroadcastScalarToVector256, tmp1); return; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T Create // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T ToVector256Unsafe // idx = CNS_INT int 0 // /--* tmp3 simd32 // +--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd32 T InsertVector128 // This is roughly the following managed code: // var tmp1 = Vector128.Create(op1); // var tmp2 = tmp1; // var tmp3 = tmp2.ToVector256Unsafe(); // return Avx.InsertVector128(tmp3, tmp1, 0x01); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp3, idx); node->ResetHWIntrinsicId(NI_AVX_InsertVector128, comp, tmp3, tmp1, idx); return; } // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // ... // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); if ((simdBaseJitType != CORINFO_TYPE_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 T BroadcastScalarToVector128 // This is roughly the following managed code: // ... // return Avx2.BroadcastScalarToVector128(tmp1); node->ChangeHWIntrinsicId(NI_AVX2_BroadcastScalarToVector128, tmp1); return; } switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // We will be constructing the following parts: // ... // tmp2 = HWINTRINSIC simd16 ubyte get_Zero // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 ubyte Shuffle // This is roughly the following managed code: // ... // var tmp2 = Vector128<byte>.Zero; // return Ssse3.Shuffle(tmp1, tmp2); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(tmp1, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSSE3_Shuffle, tmp1, tmp2); break; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 ubyte UnpackLow // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp1 = Sse2.UnpackLow(tmp1, tmp2); // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); FALLTHROUGH; } case TYP_SHORT: case TYP_USHORT: { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 ushort UnpackLow // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp1 = Sse2.UnpackLow(tmp1, tmp2); // ... assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); FALLTHROUGH; } case TYP_INT: case TYP_UINT: { // We will be constructing the following parts: // ... // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd16 uint Shuffle // This is roughly the following managed code: // ... // return Sse2.Shuffle(tmp1, 0x00); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp1, idx); node->ResetHWIntrinsicId(NI_SSE2_Shuffle, tmp1, idx); node->SetSimdBaseJitType(CORINFO_TYPE_UINT); break; } #if defined(TARGET_AMD64) case TYP_LONG: case TYP_ULONG: { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 ulong UnpackLow // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse2.UnpackLow(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); break; } #endif // TARGET_AMD64 case TYP_FLOAT: { if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX)) { // We will be constructing the following parts: // ... // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd16 float Permute // This is roughly the following managed code: // ... // return Avx.Permute(tmp1, 0x00); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp1, idx); node->ResetHWIntrinsicId(NI_AVX_Permute, tmp1, idx); break; } // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* tmp2 simd16 // +--* idx int // node = * HWINTRINSIC simd16 float Shuffle // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse.Shuffle(tmp1, tmp2, 0x00); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp2, idx); node->ResetHWIntrinsicId(NI_SSE_Shuffle, comp, tmp1, tmp2, idx); break; } case TYP_DOUBLE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 double MoveAndDuplicate // This is roughly the following managed code: // ... // return Sse3.MoveAndDuplicate(tmp1); node->ChangeHWIntrinsicId(NI_SSE3_MoveAndDuplicate, tmp1); break; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 float MoveLowToHigh // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse.MoveLowToHigh(tmp1, tmp2); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); break; } default: { unreached(); } } return; } GenTree* op2 = node->Op(2); // We have the following (where simd is simd16 or simd32): // /--* op1 T // +--* ... T // +--* opN T // node = * HWINTRINSIC simd T Create if (intrinsicId == NI_Vector256_Create) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // /--* op1 T // +--* ... T // lo = * HWINTRINSIC simd16 T Create // /--* ... T // +--* opN T // hi = * HWINTRINSIC simd16 T Create // idx = CNS_INT int 1 // /--* lo simd32 // +--* hi simd16 // +--* idx int // node = * HWINTRINSIC simd32 T InsertVector128 // This is roughly the following managed code: // ... // var lo = Vector128.Create(op1, ...); // var hi = Vector128.Create(..., opN); // return Avx.InsertVector128(lo, hi, 0x01); // Each Vector128.Create call gets half the operands. That is: // lo = Vector128.Create(op1, op2); // hi = Vector128.Create(op3, op4); // -or- // lo = Vector128.Create(op1, ..., op4); // hi = Vector128.Create(op5, ..., op8); // -or- // lo = Vector128.Create(op1, ..., op8); // hi = Vector128.Create(op9, ..., op16); // -or- // lo = Vector128.Create(op1, ..., op16); // hi = Vector128.Create(op17, ..., op32); size_t halfArgCnt = argCnt / 2; assert((halfArgCnt * 2) == argCnt); GenTree* lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, node->GetOperandArray(), halfArgCnt, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(node->Op(halfArgCnt), lo); LowerNode(lo); GenTree* hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, node->GetOperandArray(halfArgCnt), halfArgCnt, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(node->Op(argCnt), hi); LowerNode(hi); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(hi, idx); assert(argCnt >= 3); node->ResetHWIntrinsicId(NI_AVX_InsertVector128, comp, lo, hi, idx); return; } // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // ... // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { unsigned N = 0; GenTree* opN = nullptr; NamedIntrinsic insIntrinsic = NI_Illegal; if ((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); insIntrinsic = NI_SSE2_Insert; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { insIntrinsic = NI_SSE41_Insert; } if (insIntrinsic != NI_Illegal) { for (N = 1; N < argCnt - 1; N++) { // We will be constructing the following parts: // ... // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // tmp1 = * HWINTRINSIC simd16 T Insert // ... // This is roughly the following managed code: // ... // tmp1 = Sse?.Insert(tmp1, opN, N); // ... opN = node->Op(N + 1); idx = comp->gtNewIconNode(N, TYP_INT); BlockRange().InsertAfter(opN, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } assert(N == (argCnt - 1)); // We will be constructing the following parts: // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // tmp1 = Sse?.Insert(tmp1, opN, N); // ... opN = node->Op(argCnt); idx = comp->gtNewIconNode(N, TYP_INT); BlockRange().InsertAfter(opN, idx); node->ResetHWIntrinsicId(insIntrinsic, comp, tmp1, opN, idx); break; } assert((simdBaseType != TYP_SHORT) && (simdBaseType != TYP_USHORT)); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); GenTree* op[16]; op[0] = tmp1; for (N = 1; N < argCnt; N++) { opN = node->Op(N + 1); op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, op[N]); LowerNode(op[N]); } if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_UBYTE)) { for (N = 0; N < argCnt; N += 4) { // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T UnpackLow // ... // This is roughly the following managed code: // ... // tmp1 = Sse2.UnpackLow(opN, opO); // tmp2 = Sse2.UnpackLow(opP, opQ); // tmp3 = Sse2.UnpackLow(tmp1, tmp2); // ... unsigned O = N + 1; unsigned P = N + 2; unsigned Q = N + 3; tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[N], op[O], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(op[O], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[P], op[Q], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(op[Q], tmp2); LowerNode(tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT, simdSize); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); // This caches the result in index 0 through 3, depending on which // loop iteration this is and allows the rest of the logic to be // shared with the TYP_INT and TYP_UINT path. op[N / 4] = tmp3; } } // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T UnpackLow // This is roughly the following managed code: // ... // tmp1 = Sse2.UnpackLow(opN, opO); // tmp2 = Sse2.UnpackLow(opP, opQ); // return Sse2.UnpackLow(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize); BlockRange().InsertAfter(op[1], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize); BlockRange().InsertAfter(op[3], tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_ULONG); break; } #if defined(TARGET_AMD64) case TYP_LONG: case TYP_ULONG: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41_X64)) { // We will be constructing the following parts: // ... // idx = CNS_INT int 1 // /--* tmp1 simd16 // +--* op2 T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // return Sse41.X64.Insert(tmp1, op2, 0x01); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertBefore(node, idx); node->ResetHWIntrinsicId(NI_SSE41_X64_Insert, comp, tmp1, op2, idx); break; } // We will be constructing the following parts: // ... // /--* op2 T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T UnpackLow // This is roughly the following managed code: // ... // var tmp2 = Vector128.CreateScalarUnsafe(op2); // return Sse2.UnpackLow(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); break; } #endif // TARGET_AMD64 case TYP_FLOAT: { unsigned N = 0; GenTree* opN = nullptr; if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { for (N = 1; N < argCnt - 1; N++) { // We will be constructing the following parts: // ... // // /--* opN T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // tmp1 = * HWINTRINSIC simd16 T Insert // ... // This is roughly the following managed code: // ... // tmp2 = Vector128.CreateScalarUnsafe(opN); // tmp1 = Sse41.Insert(tmp1, tmp2, N << 4); // ... opN = node->Op(N + 1); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode(N << 4, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_SSE41_Insert, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } // We will be constructing the following parts: // ... // // /--* opN T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // tmp2 = Vector128.CreateScalarUnsafe(opN); // return Sse41.Insert(tmp1, tmp2, N << 4); opN = node->Op(argCnt); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode((argCnt - 1) << 4, TYP_INT); BlockRange().InsertAfter(tmp2, idx); node->ResetHWIntrinsicId(NI_SSE41_Insert, comp, tmp1, tmp2, idx); break; } // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T MoveLowToHigh // This is roughly the following managed code: // ... // tmp1 = Sse.UnpackLow(opN, opO); // tmp2 = Sse.UnpackLow(opP, opQ); // return Sse.MoveLowToHigh(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE)); GenTree* op[4]; op[0] = tmp1; for (N = 1; N < argCnt; N++) { opN = node->Op(N + 1); op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, op[N]); LowerNode(op[N]); } tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE_UnpackLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(op[1], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE_UnpackLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(op[3], tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); break; } case TYP_DOUBLE: { // We will be constructing the following parts: // ... // /--* op2 T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T MoveLowToHigh // This is roughly the following managed code: // ... // var tmp2 = Vector128.CreateScalarUnsafe(op2); // return Sse.MoveLowToHigh(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); break; } default: { unreached(); } } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicGetElement: Lowers a Vector128 or Vector256 GetElement call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->gtType; CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(!varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); if (op1->OperIs(GT_IND)) { // If the vector is already in memory, we force its // addr to be evaluated into a reg. This would allow // us to generate [regBase] or [regBase + offset] or // [regBase + sizeOf(simdBaseType) * regIndex] to access // the required vector element directly from memory. // // TODO-CQ-XARCH: If addr of GT_IND is GT_LEA, we // might be able update GT_LEA to fold the regIndex // or offset in some cases. Instead with this // approach we always evaluate GT_LEA into a reg. // Ideally, we should be able to lower GetItem intrinsic // into GT_IND(newAddr) where newAddr combines // the addr of the vector with the given index. op1->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; } if (!op2->OperIsConst()) { // We will specially handle GetElement in codegen when op2 isn't a constant return; } // We should have a bounds check inserted for any index outside the allowed range // but we need to generate some code anyways, and so we'll simply mask here for simplicity. ssize_t count = simdSize / genTypeSize(simdBaseType); ssize_t imm8 = static_cast<uint8_t>(op2->AsIntCon()->IconValue()) % count; assert(0 <= imm8 && imm8 < count); if (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) { // We will specially handle GetElement in codegen when op1 is already in memory op2->AsIntCon()->SetIconValue(imm8); return; } switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; case TYP_LONG: case TYP_ULONG: // We either support TYP_LONG or we have been decomposed into two TYP_INT inserts assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41_X64)); break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); break; default: unreached(); } // Remove the index node up front to simplify downstream logic BlockRange().Remove(op2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; if (intrinsicId == NI_Vector256_GetElement) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); if (imm8 >= count / 2) { // idx = CNS_INT int 1 // /--* op1 simd32 // +--* idx int // op1 = * HWINTRINSIC simd32 T ExtractVector128 // This is roughly the following managed code: // ... // op1 = Avx.ExtractVector128(op1, 0x01); imm8 -= count / 2; idx = comp->gtNewIconNode(1); BlockRange().InsertBefore(node, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } else { // /--* op1 simd32 // op1 = * HWINTRINSIC simd32 T GetLower // This is roughly the following managed code: // ... // op1 = op1.GetLower(); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector256_GetLower, simdBaseJitType, 16); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } op1 = tmp1; } NamedIntrinsic resIntrinsic = NI_Illegal; if (imm8 == 0 && (genTypeSize(simdBaseType) >= 4)) { switch (simdBaseType) { case TYP_LONG: resIntrinsic = NI_SSE2_X64_ConvertToInt64; break; case TYP_ULONG: resIntrinsic = NI_SSE2_X64_ConvertToUInt64; break; case TYP_INT: resIntrinsic = NI_SSE2_ConvertToInt32; break; case TYP_UINT: resIntrinsic = NI_SSE2_ConvertToUInt32; break; case TYP_FLOAT: case TYP_DOUBLE: resIntrinsic = NI_Vector128_ToScalar; break; default: unreached(); } node->ResetHWIntrinsicId(resIntrinsic, op1); } else { op2 = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(node, op2); switch (simdBaseType) { case TYP_LONG: case TYP_ULONG: { resIntrinsic = NI_SSE41_X64_Extract; break; } case TYP_FLOAT: case TYP_DOUBLE: { // We specially handle float and double for more efficient codegen resIntrinsic = NI_Vector128_GetElement; break; } case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: { resIntrinsic = NI_SSE41_Extract; break; } case TYP_SHORT: case TYP_USHORT: { resIntrinsic = NI_SSE2_Extract; break; } default: unreached(); } node->ResetHWIntrinsicId(resIntrinsic, op1, op2); } node->SetSimdSize(16); if (!varTypeIsFloating(simdBaseType)) { assert(node->GetHWIntrinsicId() != intrinsicId); LowerNode(node); } if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_SHORT)) { // The intrinsic zeros the upper bits, so we need an explicit // cast to ensure the result is properly sign extended LIR::Use use; bool foundUse = BlockRange().TryGetUse(node, &use); GenTreeCast* cast = comp->gtNewCastNode(TYP_INT, node, /* isUnsigned */ true, simdBaseType); BlockRange().InsertAfter(node, cast); if (foundUse) { use.ReplaceWith(cast); } LowerNode(cast); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicWithElement: Lowers a Vector128 or Vector256 WithElement call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->TypeGet(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); GenTree* op3 = node->Op(3); assert(op2->OperIsConst()); ssize_t imm8 = op2->AsIntCon()->IconValue(); ssize_t cachedImm8 = imm8; ssize_t count = simdSize / genTypeSize(simdBaseType); assert(0 <= imm8 && imm8 < count); switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; case TYP_LONG: case TYP_ULONG: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41_X64)); break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); break; default: unreached(); } // Remove the index node up front to simplify downstream logic BlockRange().Remove(op2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTreeHWIntrinsic* result = node; // If we have a simd32 WithElement, we will spill the original // simd32 source into a local, extract the lower/upper half from // it and then operate on that. At the end, we will insert the simd16 // result back into the simd32 local, producing our final value. if (intrinsicId == NI_Vector256_WithElement) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // This copy of "node" will have the simd16 value we need. result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseJitType, 16); BlockRange().InsertBefore(node, result); // We will be constructing the following parts: // ... // /--* op1 simd32 // * STORE_LCL_VAR simd32 // tmp32 = LCL_VAR simd32 // op1 = LCL_VAR simd32 // TODO-CQ: move the tmp32 node closer to the final InsertVector128. LIR::Use op1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(op1Use); GenTree* tmp32 = node->Op(1); op1 = comp->gtClone(tmp32); BlockRange().InsertBefore(op3, op1); if (imm8 >= count / 2) { // We will be constructing the following parts: // ... // idx = CNS_INT int 1 // /--* op1 simd32 // +--* idx int // op1 = * HWINTRINSIC simd32 T ExtractVector128 // This is roughly the following managed code: // ... // op1 = Avx.ExtractVector128(op1, 0x01); imm8 -= count / 2; idx = comp->gtNewIconNode(1); BlockRange().InsertAfter(op1, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } else { // We will be constructing the following parts: // ... // /--* op1 simd32 // op1 = * HWINTRINSIC simd32 T GetLower // This is roughly the following managed code: // ... // op1 = op1.GetLower(); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } op1 = tmp1; // Now we will insert our "result" into our simd32 temporary. idx = comp->gtNewIconNode((cachedImm8 >= count / 2) ? 1 : 0); BlockRange().InsertBefore(node, idx); node->ChangeHWIntrinsicId(NI_AVX_InsertVector128, tmp32, result, idx); } switch (simdBaseType) { case TYP_LONG: case TYP_ULONG: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE41_X64_Insert, op1, op3, idx); break; } case TYP_FLOAT: { // We will be constructing the following parts: // ... // /--* op3 float // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // This is roughly the following managed code: // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op3, NI_Vector128_CreateScalarUnsafe, CORINFO_TYPE_FLOAT, 16); BlockRange().InsertBefore(result, tmp1); LowerNode(tmp1); if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { if (imm8 == 0) { // We will be constructing the following parts: // ... // /--* op1 simd16 // +--* op2 simd16 // node = * HWINTRINSIC simd16 T MoveScalar // This is roughly the following managed code: // ... // node = Sse.MoveScalar(op1, op2); result->ResetHWIntrinsicId(NI_SSE_MoveScalar, op1, tmp1); } else { // We will be constructing the following parts: // ... // /--* op1 simd16 // * STORE_LCL_VAR simd16 // op2 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* tmp2 simd16 // +--* idx int // op1 = * HWINTRINSIC simd16 T Shuffle // idx = CNS_INT int 226 // /--* op1 simd16 // +--* tmp2 simd16 // +--* idx int // op1 = * HWINTRINSIC simd16 T Shuffle // This is roughly the following managed code: // ... // tmp2 = Sse.Shuffle(tmp1, op1, 0 or 48 or 32); // node = Sse.Shuffle(tmp2, op1, 226 or 132 or 36); result->Op(1) = op1; LIR::Use op1Use(BlockRange(), &result->Op(1), result); ReplaceWithLclVar(op1Use); op2 = result->Op(1); tmp2 = comp->gtClone(op2); BlockRange().InsertAfter(tmp1, tmp2); ssize_t controlBits1; ssize_t controlBits2; // The comments beside the control bits below are listed using the managed API operands // // In practice, for the first step the value being inserted (op3) is in tmp1 // while the other elements of the result (op1) are in tmp2. The result ends // up containing the value being inserted and its immediate neighbor. // // The second step takes that result (which is in op1) plus the other elements // from op2 (a clone of op1/tmp2 from the previous step) and combines them to // create the final result. switch (imm8) { case 1: { controlBits1 = 0; // 00 00 00 00; op1 = { X = op3, Y = op3, Z = op1.X, W = op1.X } controlBits2 = 226; // 11 10 00 10; node = { X = op1.X, Y = op3, Z = op1.Z, W = op1.W } break; } case 2: { controlBits1 = 15; // 00 00 11 11; op1 = { X = op1.W, Y = op1.W, Z = op3, W = op3 } controlBits2 = 36; // 00 10 01 00; node = { X = op1.X, Y = op1.Y, Z = op3, W = op1.W } break; } case 3: { controlBits1 = 10; // 00 00 10 10; op1 = { X = op1.Z, Y = op1.Z, Z = op3, W = op3 } controlBits2 = 132; // 10 00 01 00; node = { X = op1.X, Y = op1.Y, Z = op1.Z, W = op3 } break; } default: unreached(); } idx = comp->gtNewIconNode(controlBits1); BlockRange().InsertAfter(tmp2, idx); if (imm8 != 1) { std::swap(tmp1, tmp2); } op1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, idx, NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16); BlockRange().InsertAfter(idx, op1); LowerNode(op1); idx = comp->gtNewIconNode(controlBits2); BlockRange().InsertAfter(op1, idx); if (imm8 != 1) { std::swap(op1, op2); } result->ChangeHWIntrinsicId(NI_SSE_Shuffle, op1, op2, idx); } break; } else { imm8 = imm8 * 16; op3 = tmp1; FALLTHROUGH; } } case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE41_Insert, op1, op3, idx); break; } case TYP_SHORT: case TYP_USHORT: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE2_Insert, op1, op3, idx); break; } case TYP_DOUBLE: { // We will be constructing the following parts: // ... // /--* op3 double // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // This is roughly the following managed code: // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op3, NI_Vector128_CreateScalarUnsafe, CORINFO_TYPE_DOUBLE, 16); BlockRange().InsertBefore(result, tmp1); LowerNode(tmp1); result->ResetHWIntrinsicId((imm8 == 0) ? NI_SSE2_MoveScalar : NI_SSE2_UnpackLow, op1, tmp1); break; } default: unreached(); } assert(result->GetHWIntrinsicId() != intrinsicId); LowerNode(result); if (intrinsicId == NI_Vector256_WithElement) { // Now that we have finalized the shape of the tree, lower the insertion node as well. assert(node->GetHWIntrinsicId() == NI_AVX_InsertVector128); assert(node != result); LowerNode(node); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicDot: Lowers a Vector128 or Vector256 Dot call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); unsigned simd16Count = comp->getSIMDVectorLength(16, simdBaseType); assert((intrinsicId == NI_Vector128_Dot) || (intrinsicId == NI_Vector256_Dot)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTree* tmp3 = nullptr; NamedIntrinsic multiply = NI_Illegal; NamedIntrinsic horizontalAdd = NI_Illegal; NamedIntrinsic add = NI_Illegal; NamedIntrinsic shuffle = NI_Illegal; if (simdSize == 32) { switch (simdBaseType) { case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX2)); multiply = NI_AVX2_MultiplyLow; horizontalAdd = NI_AVX2_HorizontalAdd; add = NI_AVX2_Add; break; } case TYP_FLOAT: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // idx = CNS_INT int 0xF1 // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp1 = * HWINTRINSIC simd16 T DotProduct // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0x01 // /--* tmp2 simd16 // +--* idx int // tmp2 = * HWINTRINSIC simd16 T ExtractVector128 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T Add // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp1 = Avx.DotProduct(op1, op2, 0xFF); // var tmp2 = Avx.ExtractVector128(tmp1, 0x01); // var tmp3 = Sse.Add(tmp1, tmp2); // return tmp3.ToScalar(); idx = comp->gtNewIconNode(0xF1, TYP_INT); BlockRange().InsertBefore(node, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_SSE_Add, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); node->SetSimdSize(16); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } case TYP_DOUBLE: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); multiply = NI_AVX_Multiply; horizontalAdd = NI_AVX_HorizontalAdd; add = NI_AVX_Add; break; } default: { unreached(); } } } else { assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); switch (simdBaseType) { case TYP_SHORT: case TYP_USHORT: { multiply = NI_SSE2_MultiplyLow; horizontalAdd = NI_SSSE3_HorizontalAdd; add = NI_SSE2_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { shuffle = NI_SSE2_ShuffleLow; } break; } case TYP_INT: case TYP_UINT: { multiply = NI_SSE41_MultiplyLow; horizontalAdd = NI_SSSE3_HorizontalAdd; add = NI_SSE2_Add; assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; } case TYP_FLOAT: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We will be constructing the following parts: // idx = CNS_INT int 0xFF // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp3 = * HWINTRINSIC simd16 T DotProduct // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp3 = Avx.DotProduct(op1, op2, 0xFF); // return tmp3.ToScalar(); if (simdSize == 8) { idx = comp->gtNewIconNode(0x31, TYP_INT); } else if (simdSize == 12) { idx = comp->gtNewIconNode(0x71, TYP_INT); } else { assert(simdSize == 16); idx = comp->gtNewIconNode(0xF1, TYP_INT); } BlockRange().InsertBefore(node, idx); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } multiply = NI_SSE_Multiply; horizontalAdd = NI_SSE3_HorizontalAdd; add = NI_SSE_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { shuffle = NI_SSE_Shuffle; } break; } case TYP_DOUBLE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We will be constructing the following parts: // idx = CNS_INT int 0x31 // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp3 = * HWINTRINSIC simd16 T DotProduct // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp3 = Avx.DotProduct(op1, op2, 0x31); // return tmp3.ToScalar(); idx = comp->gtNewIconNode(0x31, TYP_INT); BlockRange().InsertBefore(node, idx); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } multiply = NI_SSE2_Multiply; horizontalAdd = NI_SSE3_HorizontalAdd; add = NI_SSE2_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { shuffle = NI_SSE2_Shuffle; } break; } default: { unreached(); } } if (simdSize == 8) { assert(simdBaseType == TYP_FLOAT); // If simdSize == 8 then we have only two elements, not the 4 that we got from getSIMDVectorLength, // which we gave a simdSize of 16. So, we set the simd16Count to 2 so that only 1 hadd will // be emitted rather than 2, so that the upper two elements will be ignored. simd16Count = 2; } else if (simdSize == 12) { assert(simdBaseType == TYP_FLOAT); // We will be constructing the following parts: // ... // +--* CNS_INT int -1 // +--* CNS_INT int -1 // +--* CNS_INT int -1 // +--* CNS_INT int 0 // tmp1 = * HWINTRINSIC simd16 T Create // /--* op2 simd16 // +--* tmp1 simd16 // op1 = * HWINTRINSIC simd16 T And // ... // This is roughly the following managed code: // ... // tmp1 = Vector128.Create(-1, -1, -1, 0); // op1 = Sse.And(op1, tmp2); // ... GenTree* cns0 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(op1, cns0); GenTree* cns1 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(cns0, cns1); GenTree* cns2 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(cns1, cns2); GenTree* cns3 = comp->gtNewIconNode(0, TYP_INT); BlockRange().InsertAfter(cns2, cns3); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, cns0, cns1, cns2, cns3, NI_Vector128_Create, CORINFO_TYPE_INT, 16); BlockRange().InsertAfter(cns3, tmp1); LowerNode(tmp1); op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, tmp1, NI_SSE_And, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp1, op1); LowerNode(op1); } } // We will be constructing the following parts: // /--* op1 simd16 // +--* op2 simd16 // tmp1 = * HWINTRINSIC simd16 T Multiply // ... // This is roughly the following managed code: // var tmp1 = Isa.Multiply(op1, op2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); // HorizontalAdd combines pairs so we need log2(simd16Count) passes to sum all elements together. int haddCount = genLog2(simd16Count); for (int i = 0; i < haddCount; i++) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // ... // This is roughly the following managed code: // ... // tmp2 = tmp1; // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); if (shuffle == NI_Illegal) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T HorizontalAdd // ... // This is roughly the following managed code: // ... // tmp1 = Isa.HorizontalAdd(tmp1, tmp2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, simdBaseJitType, simdSize); } else { int shuffleConst = 0x00; switch (i) { case 0: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) || varTypeIsFloating(simdBaseType)); // Adds (e0 + e1, e1 + e0, e2 + e3, e3 + e2), giving: // e0, e1, e2, e3 | e4, e5, e6, e7 // e1, e0, e3, e2 | e5, e4, e7, e6 // ... shuffleConst = 0xB1; break; } case 1: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) || (simdBaseType == TYP_FLOAT)); // Adds (e0 + e2, e1 + e3, e2 + e0, e3 + e1), giving: // ... // e2, e3, e0, e1 | e6, e7, e4, e5 // e3, e2, e1, e0 | e7, e6, e5, e4 shuffleConst = 0x4E; break; } case 2: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)); // Adds (e0 + e4, e1 + e5, e2 + e6, e3 + e7), giving: // ... // e4, e5, e6, e7 | e0, e1, e2, e3 // e5, e4, e7, e6 | e1, e0, e3, e2 // e6, e7, e4, e5 | e2, e3, e0, e1 // e7, e6, e5, e4 | e3, e2, e1, e0 shuffleConst = 0x4E; break; } default: { unreached(); } } idx = comp->gtNewIconNode(shuffleConst, TYP_INT); BlockRange().InsertAfter(tmp2, idx); if (varTypeIsFloating(simdBaseType)) { // We will be constructing the following parts: // ... // /--* tmp2 simd16 // * STORE_LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // tmp3 = LCL_VAR simd16 // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* tmp3 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T Shuffle // ... // This is roughly the following managed code: // ... // tmp3 = tmp2; // tmp2 = Isa.Shuffle(tmp2, tmp3, shuffleConst); // ... node->Op(1) = tmp2; LIR::Use tmp2Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp2Use); tmp2 = node->Op(1); tmp3 = comp->gtClone(tmp2); BlockRange().InsertAfter(tmp2, tmp3); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, simdBaseJitType, simdSize); } else { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)); if (i < 2) { // We will be constructing the following parts: // ... // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleLow // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleHigh // ... // This is roughly the following managed code: // ... // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode(shuffleConst, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleHigh, simdBaseJitType, simdSize); } else { assert(i == 2); // We will be constructing the following parts: // ... // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleLow // ... // This is roughly the following managed code: // ... // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); } } BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T Add // ... // This is roughly the following managed code: // ... // tmp1 = Isa.Add(tmp1, tmp2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, add, simdBaseJitType, simdSize); } BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); } if (simdSize == 32) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0x01 // /--* tmp2 simd16 // +--* idx int // tmp2 = * HWINTRINSIC simd16 T ExtractVector128 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T Add // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp2 = Avx.ExtractVector128(tmp2, 0x01); // var tmp1 = Isa.Add(tmp1, tmp2); // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, add, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); node->SetSimdSize(16); } // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // ... // return tmp1.ToScalar(); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp1); LowerNode(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicToScalar: Lowers a Vector128 or Vector256 ToScalar call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector128_ToScalar) || (intrinsicId == NI_Vector256_ToScalar)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); switch (simdBaseType) { case TYP_BYTE: case TYP_SHORT: case TYP_INT: { node->gtType = TYP_INT; node->SetSimdBaseJitType(CORINFO_TYPE_INT); node->ChangeHWIntrinsicId(NI_SSE2_ConvertToInt32); break; } case TYP_UBYTE: case TYP_USHORT: case TYP_UINT: { node->gtType = TYP_UINT; node->SetSimdBaseJitType(CORINFO_TYPE_UINT); node->ChangeHWIntrinsicId(NI_SSE2_ConvertToUInt32); break; } #if defined(TARGET_AMD64) case TYP_LONG: { node->ChangeHWIntrinsicId(NI_SSE2_X64_ConvertToInt64); break; } case TYP_ULONG: { node->ChangeHWIntrinsicId(NI_SSE2_X64_ConvertToUInt64); break; } #endif // TARGET_AMD64 case TYP_FLOAT: case TYP_DOUBLE: { ContainCheckHWIntrinsic(node); return; } default: { unreached(); } } LowerNode(node); if (genTypeSize(simdBaseType) < 4) { LIR::Use use; bool foundUse = BlockRange().TryGetUse(node, &use); GenTreeCast* cast = comp->gtNewCastNode(simdBaseType, node, node->IsUnsigned(), simdBaseType); BlockRange().InsertAfter(node, cast); if (foundUse) { use.ReplaceWith(cast); } LowerNode(cast); } } //---------------------------------------------------------------------------------------------- // Lowering::TryLowerAndOpToResetLowestSetBit: Lowers a tree AND(X, ADD(X, -1)) to HWIntrinsic::ResetLowestSetBit // // Arguments: // andNode - GT_AND node of integral type // // Return Value: // Returns the replacement node if one is created else nullptr indicating no replacement // // Notes: // Performs containment checks on the replacement node if one is created GenTree* Lowering::TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode) { assert(andNode->OperIs(GT_AND) && varTypeIsIntegral(andNode)); GenTree* op1 = andNode->gtGetOp1(); if (!op1->OperIs(GT_LCL_VAR) || comp->lvaGetDesc(op1->AsLclVar())->IsAddressExposed()) { return nullptr; } GenTree* op2 = andNode->gtGetOp2(); if (!op2->OperIs(GT_ADD)) { return nullptr; } GenTree* addOp2 = op2->gtGetOp2(); if (!addOp2->IsIntegralConst(-1)) { return nullptr; } GenTree* addOp1 = op2->gtGetOp1(); if (!addOp1->OperIs(GT_LCL_VAR) || (addOp1->AsLclVar()->GetLclNum() != op1->AsLclVar()->GetLclNum())) { return nullptr; } NamedIntrinsic intrinsic; if (op1->TypeIs(TYP_LONG) && comp->compOpportunisticallyDependsOn(InstructionSet_BMI1_X64)) { intrinsic = NamedIntrinsic::NI_BMI1_X64_ResetLowestSetBit; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_BMI1)) { intrinsic = NamedIntrinsic::NI_BMI1_ResetLowestSetBit; } else { return nullptr; } LIR::Use use; if (!BlockRange().TryGetUse(andNode, &use)) { return nullptr; } GenTreeHWIntrinsic* blsrNode = comp->gtNewScalarHWIntrinsicNode(andNode->TypeGet(), op1, intrinsic); JITDUMP("Lower: optimize AND(X, ADD(X, -1))\n"); DISPNODE(andNode); JITDUMP("to:\n"); DISPNODE(blsrNode); use.ReplaceWith(blsrNode); BlockRange().InsertBefore(andNode, blsrNode); BlockRange().Remove(andNode); BlockRange().Remove(op2); BlockRange().Remove(addOp1); BlockRange().Remove(addOp2); ContainCheckHWIntrinsic(blsrNode); return blsrNode; } //---------------------------------------------------------------------------------------------- // Lowering::TryLowerAndOpToAndNot: Lowers a tree AND(X, NOT(Y)) to HWIntrinsic::AndNot // // Arguments: // andNode - GT_AND node of integral type // // Return Value: // Returns the replacement node if one is created else nullptr indicating no replacement // // Notes: // Performs containment checks on the replacement node if one is created GenTree* Lowering::TryLowerAndOpToAndNot(GenTreeOp* andNode) { assert(andNode->OperIs(GT_AND) && varTypeIsIntegral(andNode)); GenTree* opNode = nullptr; GenTree* notNode = nullptr; if (andNode->gtGetOp1()->OperIs(GT_NOT)) { notNode = andNode->gtGetOp1(); opNode = andNode->gtGetOp2(); } else if (andNode->gtGetOp2()->OperIs(GT_NOT)) { notNode = andNode->gtGetOp2(); opNode = andNode->gtGetOp1(); } if (opNode == nullptr) { return nullptr; } // We want to avoid using "andn" when one of the operands is both a source and the destination and is also coming // from memory. In this scenario, we will get smaller and likely faster code by using the RMW encoding of `and` if (IsBinOpInRMWStoreInd(andNode)) { return nullptr; } NamedIntrinsic intrinsic; if (andNode->TypeIs(TYP_LONG) && comp->compOpportunisticallyDependsOn(InstructionSet_BMI1_X64)) { intrinsic = NamedIntrinsic::NI_BMI1_X64_AndNot; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_BMI1)) { intrinsic = NamedIntrinsic::NI_BMI1_AndNot; } else { return nullptr; } LIR::Use use; if (!BlockRange().TryGetUse(andNode, &use)) { return nullptr; } // note that parameter order for andn is ~y, x so these are purposefully reversed when creating the node GenTreeHWIntrinsic* andnNode = comp->gtNewScalarHWIntrinsicNode(andNode->TypeGet(), notNode->AsUnOp()->gtGetOp1(), opNode, intrinsic); JITDUMP("Lower: optimize AND(X, NOT(Y)))\n"); DISPNODE(andNode); JITDUMP("to:\n"); DISPNODE(andnNode); use.ReplaceWith(andnNode); BlockRange().InsertBefore(andNode, andnNode); BlockRange().Remove(andNode); BlockRange().Remove(notNode); ContainCheckHWIntrinsic(andnNode); return andnNode; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // Lowering::IsRMWIndirCandidate: // Returns true if the given operand is a candidate indirection for a read-modify-write // operator. // // Arguments: // operand - The operand to consider. // storeInd - The indirect store that roots the possible RMW operator. // bool Lowering::IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd) { // If the operand isn't an indirection, it's trivially not a candidate. if (operand->OperGet() != GT_IND) { return false; } // If the indirection's source address isn't equivalent to the destination address of the storeIndir, then the // indirection is not a candidate. GenTree* srcAddr = operand->gtGetOp1(); GenTree* dstAddr = storeInd->gtGetOp1(); if ((srcAddr->OperGet() != dstAddr->OperGet()) || !IndirsAreEquivalent(operand, storeInd)) { return false; } // If it is not safe to contain the entire tree rooted at the indirection, then the indirection is not a // candidate. Crawl the IR from the node immediately preceding the storeIndir until the last node in the // indirection's tree is visited and check the side effects at each point. m_scratchSideEffects.Clear(); assert((operand->gtLIRFlags & LIR::Flags::Mark) == 0); operand->gtLIRFlags |= LIR::Flags::Mark; unsigned markCount = 1; GenTree* node; for (node = storeInd->gtPrev; markCount > 0; node = node->gtPrev) { assert(node != nullptr); if ((node->gtLIRFlags & LIR::Flags::Mark) == 0) { m_scratchSideEffects.AddNode(comp, node); } else { node->gtLIRFlags &= ~LIR::Flags::Mark; markCount--; if (m_scratchSideEffects.InterferesWith(comp, node, false)) { // The indirection's tree contains some node that can't be moved to the storeInder. The indirection is // not a candidate. Clear any leftover mark bits and return. for (; markCount > 0; node = node->gtPrev) { if ((node->gtLIRFlags & LIR::Flags::Mark) != 0) { node->gtLIRFlags &= ~LIR::Flags::Mark; markCount--; } } return false; } node->VisitOperands([&markCount](GenTree* nodeOperand) -> GenTree::VisitResult { assert((nodeOperand->gtLIRFlags & LIR::Flags::Mark) == 0); nodeOperand->gtLIRFlags |= LIR::Flags::Mark; markCount++; return GenTree::VisitResult::Continue; }); } } // At this point we've verified that the operand is an indirection, its address is equivalent to the storeIndir's // destination address, and that it and the transitive closure of its operand can be safely contained by the // storeIndir. This indirection is therefore a candidate for an RMW op. return true; } //---------------------------------------------------------------------------------------------- // Returns true if this tree is bin-op of a GT_STOREIND of the following form // storeInd(subTreeA, binOp(gtInd(subTreeA), subtreeB)) or // storeInd(subTreeA, binOp(subtreeB, gtInd(subTreeA)) in case of commutative bin-ops // // The above form for storeInd represents a read-modify-write memory binary operation. // // Parameters // tree - GentreePtr of binOp // // Return Value // True if 'tree' is part of a RMW memory operation pattern // bool Lowering::IsBinOpInRMWStoreInd(GenTree* tree) { // Must be a non floating-point type binary operator since SSE2 doesn't support RMW memory ops assert(!varTypeIsFloating(tree)); assert(GenTree::OperIsBinary(tree->OperGet())); // Cheap bail out check before more expensive checks are performed. // RMW memory op pattern requires that one of the operands of binOp to be GT_IND. if (tree->gtGetOp1()->OperGet() != GT_IND && tree->gtGetOp2()->OperGet() != GT_IND) { return false; } LIR::Use use; if (!BlockRange().TryGetUse(tree, &use) || use.User()->OperGet() != GT_STOREIND || use.User()->gtGetOp2() != tree) { return false; } // Since it is not relatively cheap to recognize RMW memory op pattern, we // cache the result in GT_STOREIND node so that while lowering GT_STOREIND // we can use the result. GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; return IsRMWMemOpRootedAtStoreInd(use.User(), &indirCandidate, &indirOpSource); } //---------------------------------------------------------------------------------------------- // This method recognizes the case where we have a treeNode with the following structure: // storeInd(IndirDst, binOp(gtInd(IndirDst), indirOpSource)) OR // storeInd(IndirDst, binOp(indirOpSource, gtInd(IndirDst)) in case of commutative operations OR // storeInd(IndirDst, unaryOp(gtInd(IndirDst)) in case of unary operations // // Terminology: // indirDst = memory write of an addr mode (i.e. storeind destination) // indirSrc = value being written to memory (i.e. storeind source which could either be a binary or unary op) // indirCandidate = memory read i.e. a gtInd of an addr mode // indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node) // // In x86/x64 this storeInd pattern can be effectively encoded in a single instruction of the // following form in case of integer operations: // binOp [addressing mode], RegIndirOpSource // binOp [addressing mode], immediateVal // where RegIndirOpSource is the register where indirOpSource was computed. // // Right now, we recognize few cases: // a) The gtInd child is a lea/lclVar/lclVarAddr/clsVarAddr/constant // b) BinOp is either add, sub, xor, or, and, shl, rsh, rsz. // c) unaryOp is either not/neg // // Implementation Note: The following routines need to be in sync for RMW memory op optimization // to be correct and functional. // IndirsAreEquivalent() // NodesAreEquivalentLeaves() // Codegen of GT_STOREIND and genCodeForShiftRMW() // emitInsRMW() // // TODO-CQ: Enable support for more complex indirections (if needed) or use the value numbering // package to perform more complex tree recognition. // // TODO-XArch-CQ: Add support for RMW of lcl fields (e.g. lclfield binop= source) // // Parameters: // tree - GT_STOREIND node // outIndirCandidate - out param set to indirCandidate as described above // ouutIndirOpSource - out param set to indirOpSource as described above // // Return value // True if there is a RMW memory operation rooted at a GT_STOREIND tree // and out params indirCandidate and indirOpSource are set to non-null values. // Otherwise, returns false with indirCandidate and indirOpSource set to null. // Also updates flags of GT_STOREIND tree with its RMW status. // bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTree* tree, GenTree** outIndirCandidate, GenTree** outIndirOpSource) { assert(!varTypeIsFloating(tree)); assert(outIndirCandidate != nullptr); assert(outIndirOpSource != nullptr); *outIndirCandidate = nullptr; *outIndirOpSource = nullptr; // Early out if storeInd is already known to be a non-RMW memory op GenTreeStoreInd* storeInd = tree->AsStoreInd(); if (storeInd->IsNonRMWMemoryOp()) { return false; } GenTree* indirDst = storeInd->gtGetOp1(); GenTree* indirSrc = storeInd->gtGetOp2(); genTreeOps oper = indirSrc->OperGet(); // Early out if it is already known to be a RMW memory op if (storeInd->IsRMWMemoryOp()) { if (GenTree::OperIsBinary(oper)) { if (storeInd->IsRMWDstOp1()) { *outIndirCandidate = indirSrc->gtGetOp1(); *outIndirOpSource = indirSrc->gtGetOp2(); } else { assert(storeInd->IsRMWDstOp2()); *outIndirCandidate = indirSrc->gtGetOp2(); *outIndirOpSource = indirSrc->gtGetOp1(); } assert(IndirsAreEquivalent(*outIndirCandidate, storeInd)); } else { assert(GenTree::OperIsUnary(oper)); assert(IndirsAreEquivalent(indirSrc->gtGetOp1(), storeInd)); *outIndirCandidate = indirSrc->gtGetOp1(); *outIndirOpSource = indirSrc->gtGetOp1(); } return true; } // If reached here means that we do not know RMW status of tree rooted at storeInd assert(storeInd->IsRMWStatusUnknown()); // Early out if indirDst is not one of the supported memory operands. if (!indirDst->OperIs(GT_LEA, GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_CNS_INT)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } // We can not use Read-Modify-Write instruction forms with overflow checking instructions // because we are not allowed to modify the target until after the overflow check. if (indirSrc->gtOverflowEx()) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } // At this point we can match one of two patterns: // // t_ind = indir t_addr_0 // ... // t_value = binop t_ind, t_other // ... // storeIndir t_addr_1, t_value // // or // // t_ind = indir t_addr_0 // ... // t_value = unop t_ind // ... // storeIndir t_addr_1, t_value // // In all cases, we will eventually make the binop that produces t_value and the entire dataflow tree rooted at // t_ind contained by t_value. GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; RMWStatus status = STOREIND_RMW_STATUS_UNKNOWN; if (GenTree::OperIsBinary(oper)) { // Return if binary op is not one of the supported operations for RMW of memory. if (!GenTree::OperIsRMWMemOp(oper)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } if (GenTree::OperIsShiftOrRotate(oper) && varTypeIsSmall(storeInd)) { // In ldind, Integer values smaller than 4 bytes, a boolean, or a character converted to 4 bytes // by sign or zero-extension as appropriate. If we directly shift the short type data using sar, we // will lose the sign or zero-extension bits. storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_TYPE); return false; } // In the common case, the second operand to the binop will be the indir candidate. GenTreeOp* binOp = indirSrc->AsOp(); if (GenTree::OperIsCommutative(oper) && IsRMWIndirCandidate(binOp->gtOp2, storeInd)) { indirCandidate = binOp->gtOp2; indirOpSource = binOp->gtOp1; status = STOREIND_RMW_DST_IS_OP2; } else if (IsRMWIndirCandidate(binOp->gtOp1, storeInd)) { indirCandidate = binOp->gtOp1; indirOpSource = binOp->gtOp2; status = STOREIND_RMW_DST_IS_OP1; } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } } else if (GenTree::OperIsUnary(oper)) { // Nodes other than GT_NOT and GT_NEG are not yet supported. if (oper != GT_NOT && oper != GT_NEG) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } if (indirSrc->gtGetOp1()->OperGet() != GT_IND) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } GenTreeUnOp* unOp = indirSrc->AsUnOp(); if (IsRMWIndirCandidate(unOp->gtOp1, storeInd)) { // src and dest are the same in case of unary ops indirCandidate = unOp->gtOp1; indirOpSource = unOp->gtOp1; status = STOREIND_RMW_DST_IS_OP1; } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } // By this point we've verified that we have a supported operand with a supported address. Now we need to ensure // that we're able to move the destination address for the source indirection forwards. if (!IsSafeToContainMem(storeInd, indirDst)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } assert(indirCandidate != nullptr); assert(indirOpSource != nullptr); assert(status != STOREIND_RMW_STATUS_UNKNOWN); *outIndirCandidate = indirCandidate; *outIndirOpSource = indirOpSource; storeInd->SetRMWStatus(status); return true; } // anything is in range for AMD64 bool Lowering::IsCallTargetInRange(void* addr) { return true; } // return true if the immediate can be folded into an instruction, for example small enough and non-relocatable bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const { if (!childNode->IsIntCnsFitsInI32()) { return false; } // At this point we know that it is an int const fits within 4-bytes and hence can safely cast to IntConCommon. // Icons that need relocation should never be marked as contained immed if (childNode->AsIntConCommon()->ImmedValNeedsReloc(comp)) { return false; } return true; } //----------------------------------------------------------------------- // PreferredRegOptionalOperand: returns one of the operands of given // binary oper that is to be preferred for marking as reg optional. // // Since only one of op1 or op2 can be a memory operand on xarch, only // one of them have to be marked as reg optional. Since Lower doesn't // know apriori which of op1 or op2 is not likely to get a register, it // has to make a guess. This routine encapsulates heuristics that // guess whether it is likely to be beneficial to mark op1 or op2 as // reg optional. // // // Arguments: // tree - a binary-op tree node that is either commutative // or a compare oper. // // Returns: // Returns op1 or op2 of tree node that is preferred for // marking as reg optional. // // Note: if the tree oper is neither commutative nor a compare oper // then only op2 can be reg optional on xarch and hence no need to // call this routine. GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree) { assert(GenTree::OperIsBinary(tree->OperGet())); assert(tree->OperIsCommutative() || tree->OperIsCompare() || tree->OperIs(GT_CMP)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(!op1->IsRegOptional() && !op2->IsRegOptional()); // We default to op1, as op2 is likely to have the shorter lifetime. GenTree* preferredOp = op1; // This routine uses the following heuristics: // // a) If both are register candidates, marking the one with lower weighted // ref count as reg-optional would likely be beneficial as it has // higher probability of not getting a register. Note that we use !lvDoNotEnregister // here because this is being done while we are adding lclVars for Lowering. // // b) op1 = tracked local and op2 = untracked local: LSRA creates two // ref positions for op2: a def and use position. op2's def position // requires a reg and it is allocated a reg by spilling another // interval (if required) and that could be even op1. For this reason // it is beneficial to mark op1 as reg optional. // // TODO: It is not always mandatory for a def position of an untracked // local to be allocated a register if it is on rhs of an assignment // and its use position is reg-optional and has not been assigned a // register. Reg optional def positions is currently not yet supported. // // c) op1 = untracked local and op2 = tracked local: marking op1 as // reg optional is beneficial, since its use position is less likely // to get a register. // // d) If both are untracked locals (i.e. treated like tree temps by // LSRA): though either of them could be marked as reg optional, // marking op1 as reg optional is likely to be beneficial because // while allocating op2's def position, there is a possibility of // spilling op1's def and in which case op1 is treated as contained // memory operand rather than requiring to reload. // // e) If only one of them is a local var, prefer to mark it as // reg-optional. This is heuristic is based on the results // obtained against CQ perf benchmarks. // // f) If neither of them are local vars (i.e. tree temps), prefer to // mark op1 as reg optional for the same reason as mentioned in (d) above. if (op1->OperGet() == GT_LCL_VAR && op2->OperGet() == GT_LCL_VAR) { LclVarDsc* v1 = comp->lvaGetDesc(op1->AsLclVarCommon()); LclVarDsc* v2 = comp->lvaGetDesc(op2->AsLclVarCommon()); bool v1IsRegCandidate = !v1->lvDoNotEnregister; bool v2IsRegCandidate = !v2->lvDoNotEnregister; if (v1IsRegCandidate && v2IsRegCandidate) { // Both are enregisterable locals. The one with lower weight is less likely // to get a register and hence beneficial to mark the one with lower // weight as reg optional. // If either is not tracked, it may be that it was introduced after liveness // was run, in which case we will always prefer op1 (should we use raw refcnt??). if (v1->lvTracked && v2->lvTracked && (v1->lvRefCntWtd() >= v2->lvRefCntWtd())) { preferredOp = op2; } } } else if (!(op1->OperGet() == GT_LCL_VAR) && (op2->OperGet() == GT_LCL_VAR)) { preferredOp = op2; } return preferredOp; } //------------------------------------------------------------------------ // Containment analysis //------------------------------------------------------------------------ //------------------------------------------------------------------------ // ContainCheckCallOperands: Determine whether operands of a call should be contained. // // Arguments: // call - The call node of interest // // Return Value: // None. // void Lowering::ContainCheckCallOperands(GenTreeCall* call) { GenTree* ctrlExpr = call->gtControlExpr; if (call->gtCallType == CT_INDIRECT) { // either gtControlExpr != null or gtCallAddr != null. // Both cannot be non-null at the same time. assert(ctrlExpr == nullptr); assert(call->gtCallAddr != nullptr); ctrlExpr = call->gtCallAddr; #ifdef TARGET_X86 // Fast tail calls aren't currently supported on x86, but if they ever are, the code // below that handles indirect VSD calls will need to be fixed. assert(!call->IsFastTailCall() || !call->IsVirtualStub()); #endif // TARGET_X86 } // set reg requirements on call target represented as control sequence. if (ctrlExpr != nullptr) { // we should never see a gtControlExpr whose type is void. assert(ctrlExpr->TypeGet() != TYP_VOID); #ifdef TARGET_X86 // On x86, we need to generate a very specific pattern for indirect VSD calls: // // 3-byte nop // call dword ptr [eax] // // Where EAX is also used as an argument to the stub dispatch helper. Make // sure that the call target address is computed into EAX in this case. if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT)) { assert(ctrlExpr->isIndir()); MakeSrcContained(call, ctrlExpr); } else #endif // TARGET_X86 if (ctrlExpr->isIndir()) { // We may have cases where we have set a register target on the ctrlExpr, but if it // contained we must clear it. ctrlExpr->SetRegNum(REG_NA); MakeSrcContained(call, ctrlExpr); } } for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { LowerPutArgStk(use.GetNode()->AsPutArgStk()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { LowerPutArgStk(use.GetNode()->AsPutArgStk()); } } } //------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // node - The indirection node of interest // // Notes: // This is called for both store and load indirections. In the former case, it is assumed that // LowerStoreIndir() has already been called to check for RMW opportunities. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* node) { GenTree* addr = node->Addr(); // If this is the rhs of a block copy it will be handled when we handle the store. if (node->TypeGet() == TYP_STRUCT) { return; } #ifdef FEATURE_SIMD // If indirTree is of TYP_SIMD12, don't mark addr as contained // so that it always get computed to a register. This would // mean codegen side logic doesn't need to handle all possible // addr expressions that could be contained. // // TODO-XArch-CQ: handle other addr mode expressions that could be marked // as contained. if (node->TypeGet() == TYP_SIMD12) { return; } #endif // FEATURE_SIMD if ((node->gtFlags & GTF_IND_REQ_ADDR_IN_REG) != 0) { // The address of an indirection that requires its address in a reg. // Skip any further processing that might otherwise make it contained. } else if (addr->OperIs(GT_CLS_VAR_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR)) { // These nodes go into an addr mode: // - GT_CLS_VAR_ADDR turns into a constant. // - GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR is a stack addr mode. // make this contained, it turns into a constant that goes into an addr mode MakeSrcContained(node, addr); } else if (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp)) { // Amd64: // We can mark any pc-relative 32-bit addr as containable. // // On x86, direct VSD is done via a relative branch, and in fact it MUST be contained. MakeSrcContained(node, addr); } else if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(node, addr)) { MakeSrcContained(node, addr); } } //------------------------------------------------------------------------ // ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node) { // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. GenTree* src = node->Data(); if (IsContainableImmed(node, src) && (!src->IsIntegralConst(0) || varTypeIsSmall(node))) { MakeSrcContained(node, src); } ContainCheckIndir(node); } //------------------------------------------------------------------------ // ContainCheckMul: determine whether the sources of a MUL node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckMul(GenTreeOp* node) { #if defined(TARGET_X86) assert(node->OperIs(GT_MUL, GT_MULHI, GT_MUL_LONG)); #else assert(node->OperIs(GT_MUL, GT_MULHI)); #endif // Case of float/double mul. if (varTypeIsFloating(node->TypeGet())) { ContainCheckFloatBinary(node); return; } GenTree* op1 = node->AsOp()->gtOp1; GenTree* op2 = node->AsOp()->gtOp2; bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; bool isUnsignedMultiply = ((node->gtFlags & GTF_UNSIGNED) != 0); bool requiresOverflowCheck = node->gtOverflowEx(); bool useLeaEncoding = false; GenTree* memOp = nullptr; bool hasImpliedFirstOperand = false; GenTreeIntConCommon* imm = nullptr; GenTree* other = nullptr; // Multiply should never be using small types assert(!varTypeIsSmall(node->TypeGet())); // We do use the widening multiply to implement // the overflow checking for unsigned multiply // if (isUnsignedMultiply && requiresOverflowCheck) { hasImpliedFirstOperand = true; } else if (node->OperGet() == GT_MULHI) { hasImpliedFirstOperand = true; } #if defined(TARGET_X86) else if (node->OperGet() == GT_MUL_LONG) { hasImpliedFirstOperand = true; } #endif else if (IsContainableImmed(node, op2) || IsContainableImmed(node, op1)) { if (IsContainableImmed(node, op2)) { imm = op2->AsIntConCommon(); other = op1; } else { imm = op1->AsIntConCommon(); other = op2; } // CQ: We want to rewrite this into a LEA ssize_t immVal = imm->AsIntConCommon()->IconValue(); if (!requiresOverflowCheck && (immVal == 3 || immVal == 5 || immVal == 9)) { useLeaEncoding = true; } MakeSrcContained(node, imm); // The imm is always contained if (IsContainableMemoryOp(other)) { memOp = other; // memOp may be contained below } } // We allow one operand to be a contained memory operand. // The memory op type must match with the 'node' type. // This is because during codegen we use 'node' type to derive EmitTypeSize. // E.g op1 type = byte, op2 type = byte but GT_MUL node type is int. // if (memOp == nullptr) { if ((op2->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { memOp = op2; } } if ((memOp == nullptr) && (op1->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { memOp = op1; } } } else { if ((memOp->TypeGet() != node->TypeGet())) { memOp = nullptr; } else if (!IsSafeToContainMem(node, memOp)) { if (memOp == op1) { isSafeToContainOp1 = false; } else { isSafeToContainOp2 = false; } memOp = nullptr; } } // To generate an LEA we need to force memOp into a register // so don't allow memOp to be 'contained' // if (!useLeaEncoding) { if (memOp != nullptr) { MakeSrcContained(node, memOp); } else { // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, memOp would be set to the corresponding operand (op1 or op2). if (imm != nullptr) { // Has a contained immediate operand. // Only 'other' operand can be marked as reg optional. assert(other != nullptr); isSafeToContainOp1 = ((other == op1) && isSafeToContainOp1 && IsSafeToContainMem(node, op1)); isSafeToContainOp2 = ((other == op2) && isSafeToContainOp2 && IsSafeToContainMem(node, op2)); } else if (hasImpliedFirstOperand) { // Only op2 can be marked as reg optional. isSafeToContainOp1 = false; isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); } else { // If there are no containable operands, we can make either of op1 or op2 // as reg optional. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); } SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } } //------------------------------------------------------------------------ // ContainCheckDivOrMod: determine which operands of a div/mod should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckDivOrMod(GenTreeOp* node) { assert(node->OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD)); if (varTypeIsFloating(node->TypeGet())) { ContainCheckFloatBinary(node); return; } GenTree* divisor = node->gtGetOp2(); bool divisorCanBeRegOptional = true; #ifdef TARGET_X86 GenTree* dividend = node->gtGetOp1(); if (dividend->OperGet() == GT_LONG) { divisorCanBeRegOptional = false; MakeSrcContained(node, dividend); } #endif // divisor can be an r/m, but the memory indirection must be of the same size as the divide if (IsContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, divisor)) { MakeSrcContained(node, divisor); } else if (divisorCanBeRegOptional) { // If there are no containable operands, we can make an operand reg optional. // Div instruction allows only divisor to be a memory op. divisor->SetRegOptional(); } } //------------------------------------------------------------------------ // ContainCheckShiftRotate: determine whether the sources of a shift/rotate node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckShiftRotate(GenTreeOp* node) { assert(node->OperIsShiftOrRotate()); #ifdef TARGET_X86 GenTree* source = node->gtOp1; if (node->OperIsShiftLong()) { assert(source->OperGet() == GT_LONG); MakeSrcContained(node, source); } #endif // !TARGET_X86 GenTree* shiftBy = node->gtOp2; if (IsContainableImmed(node, shiftBy) && (shiftBy->AsIntConCommon()->IconValue() <= 255) && (shiftBy->AsIntConCommon()->IconValue() >= 0)) { MakeSrcContained(node, shiftBy); } } //------------------------------------------------------------------------ // ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const { assert(storeLoc->OperIsLocalStore()); GenTree* op1 = storeLoc->gtGetOp1(); if (op1->OperIs(GT_BITCAST)) { // If we know that the source of the bitcast will be in a register, then we can make // the bitcast itself contained. This will allow us to store directly from the other // type if this node doesn't get a register. GenTree* bitCastSrc = op1->gtGetOp1(); if (!bitCastSrc->isContained() && !bitCastSrc->IsRegOptional()) { op1->SetContained(); return; } } const LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc); #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc)) { assert(!op1->IsCnsIntOrI()); if (storeLoc->TypeIs(TYP_SIMD12) && op1->IsSIMDZero() && varDsc->lvDoNotEnregister) { // For a SIMD12 store we can zero from integer registers more easily. MakeSrcContained(storeLoc, op1); GenTree* constNode = op1->gtGetOp1(); assert(constNode->OperIsConst()); constNode->ClearContained(); constNode->gtType = TYP_INT; constNode->SetOper(GT_CNS_INT); } return; } #endif // FEATURE_SIMD // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. var_types type = varDsc->GetRegisterType(storeLoc); if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(type))) { MakeSrcContained(storeLoc, op1); } #ifdef TARGET_X86 else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } #endif // TARGET_X86 } //------------------------------------------------------------------------ // ContainCheckCast: determine whether the source of a CAST node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckCast(GenTreeCast* node) { GenTree* castOp = node->CastOp(); var_types castToType = node->CastToType(); var_types srcType = castOp->TypeGet(); // force the srcType to unsigned if GT_UNSIGNED flag is set if (node->gtFlags & GTF_UNSIGNED) { srcType = varTypeToUnsigned(srcType); } if (!node->gtOverflow() && (varTypeIsFloating(castToType) || varTypeIsFloating(srcType))) { #ifdef DEBUG // If converting to float/double, the operand must be 4 or 8 byte in size. if (varTypeIsFloating(castToType)) { unsigned opSize = genTypeSize(srcType); assert(opSize == 4 || opSize == 8); } #endif // DEBUG // U8 -> R8 conversion requires that the operand be in a register. if (srcType != TYP_ULONG) { if ((IsContainableMemoryOp(castOp) && IsSafeToContainMem(node, castOp)) || castOp->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, castOp); } else { // Mark castOp as reg optional to indicate codegen // can still generate code if it is on stack. castOp->SetRegOptional(); } } } #if !defined(TARGET_64BIT) if (varTypeIsLong(srcType)) { noway_assert(castOp->OperGet() == GT_LONG); castOp->SetContained(); } #endif // !defined(TARGET_64BIT) } //------------------------------------------------------------------------ // ContainCheckCompare: determine whether the sources of a compare node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckCompare(GenTreeOp* cmp) { assert(cmp->OperIsCompare() || cmp->OperIs(GT_CMP)); GenTree* op1 = cmp->AsOp()->gtOp1; GenTree* op2 = cmp->AsOp()->gtOp2; var_types op1Type = op1->TypeGet(); var_types op2Type = op2->TypeGet(); // If either of op1 or op2 is floating point values, then we need to use // ucomiss or ucomisd to compare, both of which support the following form: // ucomis[s|d] xmm, xmm/mem // That is only the second operand can be a memory op. // // Second operand is a memory Op: Note that depending on comparison operator, // the operands of ucomis[s|d] need to be reversed. Therefore, either op1 or // op2 can be a memory op depending on the comparison operator. if (varTypeIsFloating(op1Type)) { // The type of the operands has to be the same and no implicit conversions at this stage. assert(op1Type == op2Type); GenTree* otherOp; if (GenCondition::FromFloatRelop(cmp).PreferSwap()) { otherOp = op1; } else { otherOp = op2; } assert(otherOp != nullptr); bool isSafeToContainOtherOp = true; if (otherOp->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(cmp, otherOp); } else if (IsContainableMemoryOp(otherOp)) { isSafeToContainOtherOp = IsSafeToContainMem(cmp, otherOp); if (isSafeToContainOtherOp) { MakeSrcContained(cmp, otherOp); } } if (!otherOp->isContained() && isSafeToContainOtherOp && IsSafeToContainMem(cmp, otherOp)) { // SSE2 allows only otherOp to be a memory-op. Since otherOp is not // contained, we can mark it reg-optional. // IsSafeToContainMem is expensive so we call it at most once for otherOp. // If we already called IsSafeToContainMem, it must have returned false; // otherwise, otherOp would be contained. otherOp->SetRegOptional(); } return; } // TODO-XArch-CQ: factor out cmp optimization in 'genCondSetFlags' to be used here // or in other backend. if (CheckImmedAndMakeContained(cmp, op2)) { // If the types are the same, or if the constant is of the correct size, // we can treat the MemoryOp as contained. if (op1Type == op2Type) { if (IsContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1)) { MakeSrcContained(cmp, op1); } else { op1->SetRegOptional(); } } } else if (op1Type == op2Type) { // Note that TEST does not have a r,rm encoding like CMP has but we can still // contain the second operand because the emitter maps both r,rm and rm,r to // the same instruction code. This avoids the need to special case TEST here. bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(cmp, op2); if (isSafeToContainOp2) { MakeSrcContained(cmp, op2); } } if (!op2->isContained() && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(cmp, op1); if (isSafeToContainOp1) { MakeSrcContained(cmp, op1); } } if (!op1->isContained() && !op2->isContained()) { // One of op1 or op2 could be marked as reg optional // to indicate that codegen can still generate code // if one of them is on stack. GenTree* regOptionalCandidate = op1->IsCnsIntOrI() ? op2 : PreferredRegOptionalOperand(cmp); // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, the corresponding operand (op1 or op2) would be contained. bool setRegOptional = (regOptionalCandidate == op1) ? isSafeToContainOp1 && IsSafeToContainMem(cmp, op1) : isSafeToContainOp2 && IsSafeToContainMem(cmp, op2); if (setRegOptional) { regOptionalCandidate->SetRegOptional(); } } } } //------------------------------------------------------------------------ // LowerRMWMemOp: Determine if this is a valid RMW mem op, and if so lower it accordingly // // Arguments: // node - The indirect store node (GT_STORE_IND) of interest // // Return Value: // Returns true if 'node' is a valid RMW mem op; false otherwise. // bool Lowering::LowerRMWMemOp(GenTreeIndir* storeInd) { assert(storeInd->OperGet() == GT_STOREIND); // SSE2 doesn't support RMW on float values assert(!varTypeIsFloating(storeInd)); // Terminology: // indirDst = memory write of an addr mode (i.e. storeind destination) // indirSrc = value being written to memory (i.e. storeind source which could a binary/unary op) // indirCandidate = memory read i.e. a gtInd of an addr mode // indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node) GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; if (!IsRMWMemOpRootedAtStoreInd(storeInd, &indirCandidate, &indirOpSource)) { JITDUMP("Lower of StoreInd didn't mark the node as self contained for reason: %s\n", RMWStatusDescription(storeInd->AsStoreInd()->GetRMWStatus())); DISPTREERANGE(BlockRange(), storeInd); return false; } GenTree* indirDst = storeInd->gtGetOp1(); GenTree* indirSrc = storeInd->gtGetOp2(); genTreeOps oper = indirSrc->OperGet(); // At this point we have successfully detected a RMW memory op of one of the following forms // storeInd(indirDst, indirSrc(indirCandidate, indirOpSource)) OR // storeInd(indirDst, indirSrc(indirOpSource, indirCandidate) in case of commutative operations OR // storeInd(indirDst, indirSrc(indirCandidate) in case of unary operations // // Here indirSrc = one of the supported binary or unary operation for RMW of memory // indirCandidate = a GT_IND node // indirCandidateChild = operand of GT_IND indirCandidate // // The logic below does the following // Make indirOpSource contained. // Make indirSrc contained. // Make indirCandidate contained. // Make indirCandidateChild contained. // Make indirDst contained except when it is a GT_LCL_VAR or GT_CNS_INT that doesn't fit within addr // base. // // We have already done containment analysis on the indirSrc op. // If any of its operands are marked regOptional, reset that now. indirSrc->AsOp()->gtOp1->ClearRegOptional(); if (GenTree::OperIsBinary(oper)) { // On Xarch RMW operations require the source to be an immediate or in a register. // Therefore, if we have previously marked the indirOpSource as contained while lowering // the binary node, we need to reset that now. if (IsContainableMemoryOp(indirOpSource)) { indirOpSource->ClearContained(); } indirSrc->AsOp()->gtOp2->ClearRegOptional(); JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n"); } else { assert(GenTree::OperIsUnary(oper)); JITDUMP("Lower succesfully detected an assignment of the form: *addrMode = UnaryOp(*addrMode)\n"); } DISPTREERANGE(BlockRange(), storeInd); indirSrc->SetContained(); indirCandidate->SetContained(); GenTree* indirCandidateChild = indirCandidate->gtGetOp1(); indirCandidateChild->SetContained(); if (indirCandidateChild->OperGet() == GT_LEA) { GenTreeAddrMode* addrMode = indirCandidateChild->AsAddrMode(); if (addrMode->HasBase()) { assert(addrMode->Base()->OperIsLeaf()); addrMode->Base()->SetContained(); } if (addrMode->HasIndex()) { assert(addrMode->Index()->OperIsLeaf()); addrMode->Index()->SetContained(); } indirDst->SetContained(); } else { assert(indirCandidateChild->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_CNS_INT)); // If it is a GT_LCL_VAR, it still needs the reg to hold the address. // We would still need a reg for GT_CNS_INT if it doesn't fit within addressing mode base. // For GT_CLS_VAR_ADDR, we don't need a reg to hold the address, because field address value is known at jit // time. Also, we don't need a reg for GT_CLS_VAR_ADDR. if (indirCandidateChild->OperIs(GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR)) { indirDst->SetContained(); } else if (indirCandidateChild->IsCnsIntOrI() && indirCandidateChild->AsIntConCommon()->FitsInAddrBase(comp)) { indirDst->SetContained(); } } return true; } //------------------------------------------------------------------------ // ContainCheckBinary: Determine whether a binary op's operands should be contained. // // Arguments: // node - the node we care about // void Lowering::ContainCheckBinary(GenTreeOp* node) { assert(node->OperIsBinary()); if (varTypeIsFloating(node)) { assert(node->OperIs(GT_ADD, GT_SUB)); ContainCheckFloatBinary(node); return; } GenTree* op1 = node->gtOp1; GenTree* op2 = node->gtOp2; // We can directly encode the second operand if it is either a containable constant or a memory-op. // In case of memory-op, we can encode it directly provided its type matches with 'tree' type. // This is because during codegen, type of 'tree' is used to determine emit Type size. If the types // do not match, they get normalized (i.e. sign/zero extended) on load into a register. bool directlyEncodable = false; bool binOpInRMW = false; GenTree* operand = nullptr; bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (IsContainableImmed(node, op2)) { directlyEncodable = true; operand = op2; } else { binOpInRMW = IsBinOpInRMWStoreInd(node); if (!binOpInRMW) { const unsigned operatorSize = genTypeSize(node->TypeGet()); if ((genTypeSize(op2->TypeGet()) == operatorSize) && IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { directlyEncodable = true; operand = op2; } } if ((operand == nullptr) && node->OperIsCommutative()) { // If it is safe, we can reverse the order of operands of commutative operations for efficient // codegen if (IsContainableImmed(node, op1)) { directlyEncodable = true; operand = op1; } else if ((genTypeSize(op1->TypeGet()) == operatorSize) && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { directlyEncodable = true; operand = op1; } } } } } if (directlyEncodable) { assert(operand != nullptr); MakeSrcContained(node, operand); } else if (!binOpInRMW) { // If this binary op neither has contained operands, nor is a // Read-Modify-Write (RMW) operation, we can mark its operands // as reg optional. // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, directlyEncodable would be true. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } //------------------------------------------------------------------------ // ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node) { assert(node->OperIs(GT_BOUNDS_CHECK)); GenTree* other; if (CheckImmedAndMakeContained(node, node->GetIndex())) { other = node->GetArrayLength(); } else if (CheckImmedAndMakeContained(node, node->GetArrayLength())) { other = node->GetIndex(); } else if (IsContainableMemoryOp(node->GetIndex())) { other = node->GetIndex(); } else { other = node->GetArrayLength(); } if (node->GetIndex()->TypeGet() == node->GetArrayLength()->TypeGet()) { if (IsContainableMemoryOp(other) && IsSafeToContainMem(node, other)) { MakeSrcContained(node, other); } else { // We can mark 'other' as reg optional, since it is not contained. other->SetRegOptional(); } } } //------------------------------------------------------------------------ // ContainCheckIntrinsic: determine whether the source of an INTRINSIC node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckIntrinsic(GenTreeOp* node) { assert(node->OperIs(GT_INTRINSIC)); NamedIntrinsic intrinsicName = node->AsIntrinsic()->gtIntrinsicName; if ((intrinsicName == NI_System_Math_Ceiling) || (intrinsicName == NI_System_Math_Floor) || (intrinsicName == NI_System_Math_Truncate) || (intrinsicName == NI_System_Math_Round) || (intrinsicName == NI_System_Math_Sqrt)) { GenTree* op1 = node->gtGetOp1(); if ((IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) || op1->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op1); } else { // Mark the operand as reg optional since codegen can still // generate code if op1 is on stack. op1->SetRegOptional(); } } } #ifdef FEATURE_SIMD //---------------------------------------------------------------------------------------------- // ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node. // // Arguments: // simdNode - The SIMD intrinsic node. // void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) { switch (simdNode->GetSIMDIntrinsicId()) { case SIMDIntrinsicInit: { GenTree* op1 = simdNode->Op(1); #ifndef TARGET_64BIT if (op1->OperGet() == GT_LONG) { MakeSrcContained(simdNode, op1); GenTree* op1lo = op1->gtGetOp1(); GenTree* op1hi = op1->gtGetOp2(); if ((op1lo->IsIntegralConst(0) && op1hi->IsIntegralConst(0)) || (op1lo->IsIntegralConst(-1) && op1hi->IsIntegralConst(-1))) { MakeSrcContained(op1, op1lo); MakeSrcContained(op1, op1hi); } } else #endif // !TARGET_64BIT if (op1->IsFPZero() || op1->IsIntegralConst(0) || (varTypeIsIntegral(simdNode->GetSimdBaseType()) && op1->IsIntegralConst(-1))) { MakeSrcContained(simdNode, op1); } else if ((comp->getSIMDSupportLevel() == SIMD_AVX2_Supported) && ((simdNode->GetSimdSize() == 16) || (simdNode->GetSimdSize() == 32))) { // Either op1 is a float or dbl constant or an addr if (op1->IsCnsFltOrDbl() || op1->OperIsLocalAddr()) { MakeSrcContained(simdNode, op1); } } } break; case SIMDIntrinsicInitArray: // We have an array and an index, which may be contained. CheckImmedAndMakeContained(simdNode, simdNode->Op(2)); break; case SIMDIntrinsicShuffleSSE2: // Second operand is an integer constant and marked as contained. assert(simdNode->Op(2)->IsCnsIntOrI()); MakeSrcContained(simdNode, simdNode->Op(2)); break; default: break; } } #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryGetContainableHWIntrinsicOp: Tries to get a containable node for a given HWIntrinsic // // Arguments: // [In] containingNode - The hardware intrinsic node which contains 'node' // [In/Out] pNode - The node to check and potentially replace with the containable node // [Out] supportsRegOptional - On return, this will be true if 'containingNode' supports regOptional operands // otherwise, false. // [In] transparentParentNode - optional "transparent" intrinsic parent like CreateScalarUnsafe // // Return Value: // true if 'node' is a containable by containingNode; otherwise, false. // // When true is returned 'node' (and by extension the relevant op of 'containingNode') may be modified // to handle special scenarios such as CreateScalarUnsafe which exist to bridge the type system with // the actual registers. // // When false is returned 'node' is not modified. // bool Lowering::TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode) { assert(containingNode != nullptr); assert((pNode != nullptr) && (*pNode != nullptr)); assert(supportsRegOptional != nullptr); NamedIntrinsic containingIntrinsicId = containingNode->GetHWIntrinsicId(); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(containingIntrinsicId); GenTree*& node = *pNode; // We shouldn't have called in here if containingNode doesn't support containment assert(HWIntrinsicInfo::SupportsContainment(containingIntrinsicId)); // containingNode supports nodes that read from an aligned memory address // // This will generally be an explicit LoadAligned instruction and is false for // machines with VEX support when minOpts is enabled. This is because there is // currently no way to guarantee that the address read from will always be // aligned and we want to assert that the address is aligned when optimizations // aren't enabled. However, when optimizations are enabled, we want to allow // folding of memory operands as it produces better codegen and allows simpler // coding patterns on the managed side. bool supportsAlignedSIMDLoads = false; // containingNode supports nodes that read from general memory // // We currently have to assume all "general" loads are unaligned. As such, this is // generally used to determine if we can mark the node as `regOptional` in the case // where `node` is not containable. However, this can also be used to determine whether // we can mark other types of reads as contained (such as when directly reading a local). bool supportsGeneralLoads = false; // containingNode supports nodes that read from a scalar memory address // // This will generally be an explicit LoadScalar instruction but is also used to determine // whether we can read an address of type T (we don't support this when the load would // read more than sizeof(T) bytes). bool supportsSIMDScalarLoads = false; // containingNode supports nodes that read from an unaligned memory address // // This will generally be an explicit Load instruction and is generally false for machines // without VEX support. This is because older hardware required that the SIMD operand always // be aligned to the 'natural alignment' of the type. bool supportsUnalignedSIMDLoads = false; switch (category) { case HW_Category_MemoryLoad: { supportsGeneralLoads = !node->OperIsHWIntrinsic(); break; } case HW_Category_SimpleSIMD: { switch (containingIntrinsicId) { case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: { assert(!supportsSIMDScalarLoads); if (!containingNode->OperIsMemoryLoad()) { // The containable form is the one that takes a SIMD value, that may be in memory. if (!comp->canUseVexEncoding()) { supportsAlignedSIMDLoads = true; supportsUnalignedSIMDLoads = !supportsAlignedSIMDLoads; } else { supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; } // General loads are a bit special where we need at least `sizeof(simdType) / (sizeof(baseType) // * 2)` elements // For example: // * ConvertToVector128Int16 - sizeof(simdType) = 16; sizeof(baseType) = 1; expectedSize = 8 // * ConvertToVector128Int32 - sizeof(simdType) = 16; sizeof(baseType) = 1 | 2; // expectedSize = 8 | 4 // * ConvertToVector128Int64 - sizeof(simdType) = 16; sizeof(baseType) = 1 | 2 | 4; // expectedSize = 8 | 4 | 2 // * ConvertToVector256Int16 - sizeof(simdType) = 32; sizeof(baseType) = 1; expectedSize = 16 // * ConvertToVector256Int32 - sizeof(simdType) = 32; sizeof(baseType) = 1 | 2; // expectedSize = 16 | 8 // * ConvertToVector256Int64 - sizeof(simdType) = 32; sizeof(baseType) = 1 | 2 | 4; // expectedSize = 16 | 8 | 4 const unsigned sizeof_simdType = genTypeSize(containingNode->TypeGet()); const unsigned sizeof_baseType = genTypeSize(containingNode->GetSimdBaseType()); assert((sizeof_simdType == 16) || (sizeof_simdType == 32)); assert((sizeof_baseType == 1) || (sizeof_baseType == 2) || (sizeof_baseType == 4)); const unsigned expectedSize = sizeof_simdType / (sizeof_baseType * 2); const unsigned operandSize = genTypeSize(node->TypeGet()); assert((sizeof_simdType != 16) || (expectedSize == 8) || (expectedSize == 4) || (expectedSize == 2)); assert((sizeof_simdType != 32) || (expectedSize == 16) || (expectedSize == 8) || (expectedSize == 4)); supportsGeneralLoads = (operandSize >= expectedSize); } else { // The memory form of this already takes a pointer and should be treated like a MemoryLoad supportsGeneralLoads = !node->OperIsHWIntrinsic(); } break; } case NI_SSE2_ConvertToVector128Double: case NI_SSE3_MoveAndDuplicate: case NI_AVX_ConvertToVector256Double: { assert(!supportsSIMDScalarLoads); // Most instructions under the non-VEX encoding require aligned operands. // Those used for Sse2.ConvertToVector128Double (CVTDQ2PD and CVTPS2PD) // and Sse3.MoveAndDuplicate (MOVDDUP) are exceptions and don't fail for // unaligned inputs as they read mem64 (half the vector width) instead supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; const unsigned expectedSize = genTypeSize(containingNode->TypeGet()) / 2; const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } default: { assert(!supportsSIMDScalarLoads); if (!comp->canUseVexEncoding()) { assert(!supportsUnalignedSIMDLoads); supportsAlignedSIMDLoads = true; } else { supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; } const unsigned expectedSize = genTypeSize(containingNode->TypeGet()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } } assert(supportsSIMDScalarLoads == false); break; } case HW_Category_IMM: { switch (containingIntrinsicId) { case NI_SSE_Shuffle: case NI_SSE2_ShiftLeftLogical: case NI_SSE2_ShiftRightArithmetic: case NI_SSE2_ShiftRightLogical: case NI_SSE2_Shuffle: case NI_SSE2_ShuffleHigh: case NI_SSE2_ShuffleLow: case NI_SSSE3_AlignRight: case NI_SSE41_Blend: case NI_SSE41_DotProduct: case NI_SSE41_MultipleSumAbsoluteDifferences: case NI_AES_KeygenAssist: case NI_PCLMULQDQ_CarrylessMultiply: case NI_AVX_Blend: case NI_AVX_Compare: case NI_AVX_DotProduct: case NI_AVX_Permute: case NI_AVX_Permute2x128: case NI_AVX2_Blend: case NI_AVX2_MultipleSumAbsoluteDifferences: case NI_AVX2_Permute2x128: case NI_AVX2_Permute4x64: case NI_AVX2_ShiftLeftLogical: case NI_AVX2_ShiftRightArithmetic: case NI_AVX2_ShiftRightLogical: case NI_AVX2_ShuffleHigh: case NI_AVX2_ShuffleLow: { assert(!supportsSIMDScalarLoads); const unsigned expectedSize = genTypeSize(containingNode->GetSimdBaseType()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } case NI_AVX_InsertVector128: case NI_AVX2_InsertVector128: { // InsertVector128 is special in that that it returns a TYP_SIMD32 but takes a TYP_SIMD16 assert(!supportsSIMDScalarLoads); const unsigned expectedSize = 16; const unsigned operandSize = genTypeSize(node->TypeGet()); supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } case NI_SSE2_Insert: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); if (containingNode->GetSimdBaseType() == TYP_FLOAT) { assert(containingIntrinsicId == NI_SSE41_Insert); // Sse41.Insert(V128<float>, V128<float>, byte) is a bit special // in that it has different behavior depending on whether the // second operand is coming from a register or memory. When coming // from a register, all 4 elements of the vector can be used and it // is effectively a regular `SimpleSIMD` operation; but when loading // from memory, it only works with the lowest element and is effectively // a `SIMDScalar`. assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); GenTree* op1 = containingNode->Op(1); GenTree* op2 = containingNode->Op(2); GenTree* op3 = containingNode->Op(3); // The upper two bits of the immediate value are ignored if // op2 comes from memory. In order to support using the upper // bits, we need to disable containment support if op3 is not // constant or if the constant is greater than 0x3F (which means // at least one of the upper two bits is set). if (op3->IsCnsIntOrI()) { ssize_t ival = op3->AsIntCon()->IconValue(); assert((ival >= 0) && (ival <= 255)); supportsSIMDScalarLoads = (ival <= 0x3F); supportsGeneralLoads = supportsSIMDScalarLoads; } break; } // We should only get here for integral nodes. assert(varTypeIsIntegral(node->TypeGet())); assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(containingNode->GetSimdBaseType()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } case NI_AVX_CompareScalar: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } default: { assert(supportsAlignedSIMDLoads == false); assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); assert(supportsUnalignedSIMDLoads == false); break; } } break; } case HW_Category_SIMDScalar: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); switch (containingIntrinsicId) { case NI_Vector128_CreateScalarUnsafe: case NI_Vector256_CreateScalarUnsafe: { if (!varTypeIsIntegral(node->TypeGet())) { // The floating-point overload doesn't require any special semantics supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } // The integral overloads only take GPR/mem assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType())); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } case NI_AVX2_BroadcastScalarToVector128: case NI_AVX2_BroadcastScalarToVector256: { if (!containingNode->OperIsMemoryLoad()) { // The containable form is the one that takes a SIMD value, that may be in memory. supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; } else { // The memory form of this already takes a pointer and should be treated like a MemoryLoad supportsGeneralLoads = !node->OperIsHWIntrinsic(); } break; } case NI_SSE_ConvertScalarToVector128Single: case NI_SSE2_ConvertScalarToVector128Double: case NI_SSE2_ConvertScalarToVector128Int32: case NI_SSE2_ConvertScalarToVector128UInt32: case NI_SSE_X64_ConvertScalarToVector128Single: case NI_SSE2_X64_ConvertScalarToVector128Double: case NI_SSE2_X64_ConvertScalarToVector128Int64: case NI_SSE2_X64_ConvertScalarToVector128UInt64: { if (!varTypeIsIntegral(node->TypeGet())) { // The floating-point overload doesn't require any special semantics assert(containingIntrinsicId == NI_SSE2_ConvertScalarToVector128Double); supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } // The integral overloads only take GPR/mem assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType())); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } default: { supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } } break; } case HW_Category_Scalar: { // We should only get here for integral nodes. assert(varTypeIsIntegral(node->TypeGet())); assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); assert(supportsSIMDScalarLoads == false); unsigned expectedSize = genTypeSize(containingNode->TypeGet()); const unsigned operandSize = genTypeSize(node->TypeGet()); // CRC32 codegen depends on its second oprand's type. // Currently, we are using SIMDBaseType to store the op2Type info. if (containingIntrinsicId == NI_SSE42_Crc32) { var_types op2Type = containingNode->GetSimdBaseType(); expectedSize = genTypeSize(op2Type); } supportsGeneralLoads = (operandSize >= expectedSize); break; } default: { assert(supportsAlignedSIMDLoads == false); assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); assert(supportsUnalignedSIMDLoads == false); break; } } *supportsRegOptional = supportsGeneralLoads; if (!node->OperIsHWIntrinsic()) { bool canBeContained = false; if (supportsGeneralLoads) { if (IsContainableMemoryOp(node)) { // Code motion safety checks // if (transparentParentNode != nullptr) { canBeContained = IsSafeToContainMem(containingNode, transparentParentNode, node); } else { canBeContained = IsSafeToContainMem(containingNode, node); } } else if (node->IsCnsNonZeroFltOrDbl()) { // Always safe. // canBeContained = true; } } return canBeContained; } // TODO-XArch: Update this to be table driven, if possible. GenTreeHWIntrinsic* hwintrinsic = node->AsHWIntrinsic(); NamedIntrinsic intrinsicId = hwintrinsic->GetHWIntrinsicId(); switch (intrinsicId) { case NI_Vector128_CreateScalarUnsafe: case NI_Vector256_CreateScalarUnsafe: { if (!supportsSIMDScalarLoads) { return false; } GenTree* op1 = hwintrinsic->Op(1); bool op1SupportsRegOptional = false; if (!TryGetContainableHWIntrinsicOp(containingNode, &op1, &op1SupportsRegOptional, hwintrinsic)) { return false; } LIR::Use use; if (!BlockRange().TryGetUse(node, &use) || (use.User() != containingNode)) { return false; } // We have CreateScalarUnsafe where the underlying scalar is directly containable // by containingNode. As such, we'll just remove CreateScalarUnsafe and consume // the value directly. use.ReplaceWith(op1); BlockRange().Remove(node); node = op1; node->ClearContained(); return true; } case NI_SSE_LoadAlignedVector128: case NI_SSE2_LoadAlignedVector128: case NI_AVX_LoadAlignedVector256: { return supportsAlignedSIMDLoads; } case NI_SSE_LoadScalarVector128: case NI_SSE2_LoadScalarVector128: { return supportsSIMDScalarLoads; } case NI_SSE_LoadVector128: case NI_SSE2_LoadVector128: case NI_AVX_LoadVector256: { return supportsUnalignedSIMDLoads; } case NI_AVX_ExtractVector128: case NI_AVX2_ExtractVector128: { return false; } default: { assert(!node->isContainableHWIntrinsic()); return false; } } } //---------------------------------------------------------------------------------------------- // ContainCheckHWIntrinsicAddr: Perform containment analysis for an address operand of a hardware // intrinsic node. // // Arguments: // node - The hardware intrinsic node // addr - The address node to try contain // void Lowering::ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr) { assert((addr->TypeGet() == TYP_I_IMPL) || (addr->TypeGet() == TYP_BYREF)); TryCreateAddrMode(addr, true, node); if ((addr->OperIs(GT_CLS_VAR_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR, GT_LEA) || (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp))) && IsSafeToContainMem(node, addr)) { MakeSrcContained(node, addr); } } //---------------------------------------------------------------------------------------------- // ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node. // // Arguments: // node - The hardware intrinsic node. // void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId); size_t numArgs = node->GetOperandCount(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); if (!HWIntrinsicInfo::SupportsContainment(intrinsicId)) { // AVX2 gather are not containable and always have constant IMM argument if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsicId)) { GenTree* lastOp = node->Op(numArgs); MakeSrcContained(node, lastOp); } // Exit early if containment isn't supported return; } if (HWIntrinsicInfo::lookupCategory(intrinsicId) == HW_Category_IMM) { GenTree* lastOp = node->Op(numArgs); if (HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI()) { MakeSrcContained(node, lastOp); } } if ((node->GetSimdSize() == 8) || (node->GetSimdSize() == 12)) { // We want to handle GetElement still for Vector2/3 if ((intrinsicId != NI_Vector128_GetElement) && (intrinsicId != NI_Vector256_GetElement)) { // TODO-XArch-CQ: Ideally we would key this off of the size containingNode // expects vs the size node actually is or would be if spilled to the stack return; } } // TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained const bool isCommutative = HWIntrinsicInfo::IsCommutative(intrinsicId); GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; if (numArgs == 1) { // One argument intrinsics cannot be commutative assert(!isCommutative); op1 = node->Op(1); switch (category) { case HW_Category_MemoryLoad: ContainCheckHWIntrinsicAddr(node, op1); break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { switch (intrinsicId) { case NI_SSE_ReciprocalScalar: case NI_SSE_ReciprocalSqrtScalar: case NI_SSE_SqrtScalar: case NI_SSE2_SqrtScalar: case NI_SSE41_CeilingScalar: case NI_SSE41_FloorScalar: case NI_SSE41_RoundCurrentDirectionScalar: case NI_SSE41_RoundToNearestIntegerScalar: case NI_SSE41_RoundToNegativeInfinityScalar: case NI_SSE41_RoundToPositiveInfinityScalar: case NI_SSE41_RoundToZeroScalar: { // These intrinsics have both 1 and 2-operand overloads. // // The 1-operand overload basically does `intrinsic(op1, op1)` // // Because of this, the operand must be loaded into a register // and cannot be contained. return; } case NI_SSE2_ConvertToInt32: case NI_SSE2_X64_ConvertToInt64: case NI_SSE2_ConvertToUInt32: case NI_SSE2_X64_ConvertToUInt64: case NI_AVX2_ConvertToInt32: case NI_AVX2_ConvertToUInt32: { if (varTypeIsIntegral(simdBaseType)) { // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm" and don't // currently support containment. return; } break; } case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: if (!varTypeIsSIMD(op1)) { ContainCheckHWIntrinsicAddr(node, op1); return; } break; default: { break; } } bool supportsRegOptional = false; if (node->OperIsMemoryLoad()) { // We have a few cases that can be potential memory loads assert((intrinsicId == NI_SSE41_ConvertToVector128Int16) || (intrinsicId == NI_SSE41_ConvertToVector128Int32) || (intrinsicId == NI_SSE41_ConvertToVector128Int64) || (intrinsicId == NI_AVX2_BroadcastScalarToVector128) || (intrinsicId == NI_AVX2_BroadcastScalarToVector256) || (intrinsicId == NI_AVX2_ConvertToVector256Int16) || (intrinsicId == NI_AVX2_ConvertToVector256Int32) || (intrinsicId == NI_AVX2_ConvertToVector256Int64)); ContainCheckHWIntrinsicAddr(node, op1); } else if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } default: { unreached(); break; } } } else { if (numArgs == 2) { op1 = node->Op(1); op2 = node->Op(2); switch (category) { case HW_Category_MemoryLoad: if ((intrinsicId == NI_AVX_MaskLoad) || (intrinsicId == NI_AVX2_MaskLoad)) { ContainCheckHWIntrinsicAddr(node, op1); } else { ContainCheckHWIntrinsicAddr(node, op2); } break; case HW_Category_MemoryStore: ContainCheckHWIntrinsicAddr(node, op1); if (((intrinsicId == NI_SSE_Store) || (intrinsicId == NI_SSE2_Store)) && op2->OperIsHWIntrinsic() && ((op2->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AVX_ExtractVector128) || (op2->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AVX2_ExtractVector128)) && op2->gtGetOp2()->IsIntegralConst()) { MakeSrcContained(node, op2); } break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { bool supportsRegOptional = false; if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if ((isCommutative || (intrinsicId == NI_BMI2_MultiplyNoFlags) || (intrinsicId == NI_BMI2_X64_MultiplyNoFlags)) && TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); // Swap the operands here to make the containment checks in codegen significantly simpler node->Op(1) = op2; node->Op(2) = op1; } else if (supportsRegOptional) { op2->SetRegOptional(); // TODO-XArch-CQ: For commutative nodes, either operand can be reg-optional. // https://github.com/dotnet/runtime/issues/6358 } break; } case HW_Category_IMM: { // We don't currently have any IMM intrinsics which are also commutative assert(!isCommutative); bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE2_Extract: case NI_AVX_ExtractVector128: case NI_AVX2_ExtractVector128: { // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm, imm8" and don't // currently support containment. break; } case NI_SSE2_ShiftLeftLogical: case NI_SSE2_ShiftRightArithmetic: case NI_SSE2_ShiftRightLogical: case NI_AVX2_ShiftLeftLogical: case NI_AVX2_ShiftRightArithmetic: case NI_AVX2_ShiftRightLogical: { // These intrinsics can have op2 be imm or reg/mem if (!HWIntrinsicInfo::isImmOp(intrinsicId, op2)) { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } } break; } case NI_SSE2_Shuffle: case NI_SSE2_ShuffleHigh: case NI_SSE2_ShuffleLow: case NI_AVX2_Permute4x64: case NI_AVX2_Shuffle: case NI_AVX2_ShuffleHigh: case NI_AVX2_ShuffleLow: { // These intrinsics have op2 as an imm and op1 as a reg/mem if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } case NI_SSE41_Extract: case NI_SSE41_X64_Extract: { assert(!varTypeIsFloating(simdBaseType)); // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm, imm8" and don't // currently support containment. break; } case NI_AVX_Permute: { // These intrinsics can have op2 be imm or reg/mem // They also can have op1 be reg/mem and op2 be imm if (HWIntrinsicInfo::isImmOp(intrinsicId, op2)) { if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } } else if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } case NI_AES_KeygenAssist: { if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } case NI_SSE2_ShiftLeftLogical128BitLane: case NI_SSE2_ShiftRightLogical128BitLane: case NI_AVX2_ShiftLeftLogical128BitLane: case NI_AVX2_ShiftRightLogical128BitLane: { #if DEBUG // These intrinsics should have been marked contained by the general-purpose handling // earlier in the method. GenTree* lastOp = node->Op(numArgs); if (HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI()) { assert(lastOp->isContained()); } #endif break; } default: { assert(!"Unhandled containment for binary hardware intrinsic with immediate operand"); break; } } break; } case HW_Category_Helper: { // We don't currently have any IMM intrinsics which are also commutative assert(!isCommutative); switch (intrinsicId) { case NI_Vector128_GetElement: case NI_Vector256_GetElement: { if (op1->OperIs(GT_IND)) { assert((op1->gtFlags & GTF_IND_REQ_ADDR_IN_REG) != 0); op1->AsIndir()->Addr()->ClearContained(); } if (op2->OperIsConst()) { MakeSrcContained(node, op2); } if (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) { MakeSrcContained(node, op1); if (op1->OperIs(GT_IND)) { op1->AsIndir()->Addr()->ClearContained(); } } break; } default: { assert(!"Unhandled containment for helper binary hardware intrinsic"); break; } } break; } default: { unreached(); break; } } } else if (numArgs == 3) { // three argument intrinsics should not be marked commutative assert(!isCommutative); op1 = node->Op(1); op2 = node->Op(2); op3 = node->Op(3); switch (category) { case HW_Category_MemoryStore: ContainCheckHWIntrinsicAddr(node, op1); break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { if ((intrinsicId >= NI_FMA_MultiplyAdd) && (intrinsicId <= NI_FMA_MultiplySubtractNegatedScalar)) { bool supportsOp1RegOptional = false; bool supportsOp2RegOptional = false; bool supportsOp3RegOptional = false; unsigned resultOpNum = 0; LIR::Use use; GenTree* user = nullptr; if (BlockRange().TryGetUse(node, &use)) { user = use.User(); } resultOpNum = node->GetResultOpNumForFMA(user, op1, op2, op3); // Prioritize Containable op. Check if any one of the op is containable first. // Set op regOptional only if none of them is containable. // Prefer to make op3 contained, if (resultOpNum != 3 && TryGetContainableHWIntrinsicOp(node, &op3, &supportsOp3RegOptional)) { // result = (op1 * op2) + [op3] MakeSrcContained(node, op3); } else if (resultOpNum != 2 && TryGetContainableHWIntrinsicOp(node, &op2, &supportsOp2RegOptional)) { // result = (op1 * [op2]) + op3 MakeSrcContained(node, op2); } else if (resultOpNum != 1 && !HWIntrinsicInfo::CopiesUpperBits(intrinsicId) && TryGetContainableHWIntrinsicOp(node, &op1, &supportsOp1RegOptional)) { // result = ([op1] * op2) + op3 MakeSrcContained(node, op1); } else if (supportsOp3RegOptional) { assert(resultOpNum != 3); op3->SetRegOptional(); } else if (supportsOp2RegOptional) { assert(resultOpNum != 2); op2->SetRegOptional(); } else if (supportsOp1RegOptional) { op1->SetRegOptional(); } } else { bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE41_BlendVariable: case NI_AVX_BlendVariable: case NI_AVX2_BlendVariable: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } case NI_AVXVNNI_MultiplyWideningAndAdd: case NI_AVXVNNI_MultiplyWideningAndAddSaturate: { if (TryGetContainableHWIntrinsicOp(node, &op3, &supportsRegOptional)) { MakeSrcContained(node, op3); } else if (supportsRegOptional) { op3->SetRegOptional(); } break; } case NI_BMI2_MultiplyNoFlags: case NI_BMI2_X64_MultiplyNoFlags: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); // MultiplyNoFlags is a Commutative operation, so swap the first two operands here // to make the containment checks in codegen significantly simpler node->Op(1) = op2; node->Op(2) = op1; } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } default: { unreached(); break; } } } break; } case HW_Category_IMM: { bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE_Shuffle: case NI_SSE2_Insert: case NI_SSE2_Shuffle: case NI_SSSE3_AlignRight: case NI_SSE41_Blend: case NI_SSE41_DotProduct: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: case NI_SSE41_MultipleSumAbsoluteDifferences: case NI_AVX_Blend: case NI_AVX_Compare: case NI_AVX_CompareScalar: case NI_AVX_DotProduct: case NI_AVX_InsertVector128: case NI_AVX_Permute2x128: case NI_AVX_Shuffle: case NI_AVX2_AlignRight: case NI_AVX2_Blend: case NI_AVX2_InsertVector128: case NI_AVX2_MultipleSumAbsoluteDifferences: case NI_AVX2_Permute2x128: case NI_PCLMULQDQ_CarrylessMultiply: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } default: { assert(!"Unhandled containment for ternary hardware intrinsic with immediate operand"); break; } } break; } default: { unreached(); break; } } } else { unreached(); } } } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // ContainCheckFloatBinary: determine whether the sources of a floating point binary node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckFloatBinary(GenTreeOp* node) { assert(node->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_DIV) && varTypeIsFloating(node)); // overflow operations aren't supported on float/double types. assert(!node->gtOverflowEx()); GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); // No implicit conversions at this stage as the expectation is that // everything is made explicit by adding casts. assert(op1->TypeGet() == op2->TypeGet()); bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (op2->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op2); } else if (IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { MakeSrcContained(node, op2); } } if (!op2->isContained() && node->OperIsCommutative()) { // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands // as long as it is safe so that the following efficient code sequence is generated: // addss/sd targetReg, memOp (if op1Reg == targetReg) OR // movaps targetReg, op2Reg; addss/sd targetReg, [memOp] // // Instead of // movss op1Reg, [memOp]; addss/sd targetReg, Op2Reg (if op1Reg == targetReg) OR // movss op1Reg, [memOp]; movaps targetReg, op1Reg, addss/sd targetReg, Op2Reg if (op1->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op1); } else if (IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { MakeSrcContained(node, op1); } } } if (!op1->isContained() && !op2->isContained()) { // If there are no containable operands, we can make an operand reg optional. // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, the corresponding operand (op1 or op2) would be contained. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } #endif // TARGET_XARCH
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lowering for AMD64, x86 XX XX XX XX This encapsulates all the logic for lowering trees for the AMD64 XX XX architecture. For a more detailed view of what is lowering, please XX XX take a look at Lower.cpp XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef TARGET_XARCH // This file is only used for xarch #include "jit.h" #include "sideeffects.h" #include "lower.h" // xarch supports both ROL and ROR instructions so no lowering is required. void Lowering::LowerRotate(GenTree* tree) { ContainCheckShiftRotate(tree->AsOp()); } //------------------------------------------------------------------------ // LowerStoreLoc: Lower a store of a lclVar // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This involves: // - Handling of contained immediates. // - Widening operations of unsigneds. void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc) { // Try to widen the ops if they are going into a local var. if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (storeLoc->gtOp1->gtOper == GT_CNS_INT)) { GenTreeIntCon* con = storeLoc->gtOp1->AsIntCon(); ssize_t ival = con->gtIconVal; LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc); if (varDsc->lvIsSIMDType()) { noway_assert(storeLoc->gtType != TYP_STRUCT); } unsigned size = genTypeSize(storeLoc); // If we are storing a constant into a local variable // we extend the size of the store here if ((size < 4) && !varTypeIsStruct(varDsc)) { if (!varTypeIsUnsigned(varDsc)) { if (genTypeSize(storeLoc) == 1) { if ((ival & 0x7f) != ival) { ival = ival | 0xffffff00; } } else { assert(genTypeSize(storeLoc) == 2); if ((ival & 0x7fff) != ival) { ival = ival | 0xffff0000; } } } // A local stack slot is at least 4 bytes in size, regardless of // what the local var is typed as, so auto-promote it here // unless it is a field of a promoted struct // TODO-XArch-CQ: if the field is promoted shouldn't we also be able to do this? if (!varDsc->lvIsStructField) { storeLoc->gtType = TYP_INT; con->SetIconValue(ival); } } } if (storeLoc->OperIs(GT_STORE_LCL_FLD)) { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(storeLoc->GetLclNum()); } ContainCheckStoreLoc(storeLoc); } //------------------------------------------------------------------------ // LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained. // // Arguments: // node - The indirect store node (GT_STORE_IND) of interest // // Return Value: // None. // void Lowering::LowerStoreIndir(GenTreeStoreInd* node) { // Mark all GT_STOREIND nodes to indicate that it is not known // whether it represents a RMW memory op. node->SetRMWStatusDefault(); if (!varTypeIsFloating(node)) { // Perform recognition of trees with the following structure: // StoreInd(addr, BinOp(expr, GT_IND(addr))) // to be able to fold this into an instruction of the form // BINOP [addr], register // where register is the actual place where 'expr' is computed. // // SSE2 doesn't support RMW form of instructions. if (LowerRMWMemOp(node)) { return; } } // Optimization: do not unnecessarily zero-extend the result of setcc. if (varTypeIsByte(node) && (node->Data()->OperIsCompare() || node->Data()->OperIs(GT_SETCC))) { node->Data()->ChangeType(TYP_BYTE); } ContainCheckStoreIndir(node); } //------------------------------------------------------------------------ // LowerMul: Lower a GT_MUL/GT_MULHI/GT_MUL_LONG node. // // Currently only performs containment checks. // // Arguments: // mul - The node to lower // // Return Value: // The next node to lower. // GenTree* Lowering::LowerMul(GenTreeOp* mul) { assert(mul->OperIsMul()); ContainCheckMul(mul); return mul->gtNext; } //------------------------------------------------------------------------ // LowerBinaryArithmetic: lowers the given binary arithmetic node. // // Recognizes opportunities for using target-independent "combined" nodes // Performs containment checks. // // Arguments: // node - the arithmetic node to lower // // Returns: // The next node to lower. // GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp) { #ifdef FEATURE_HW_INTRINSICS if (comp->opts.OptimizationEnabled() && binOp->OperIs(GT_AND) && varTypeIsIntegral(binOp)) { GenTree* replacementNode = TryLowerAndOpToAndNot(binOp); if (replacementNode != nullptr) { return replacementNode->gtNext; } replacementNode = TryLowerAndOpToResetLowestSetBit(binOp); if (replacementNode != nullptr) { return replacementNode->gtNext; } replacementNode = TryLowerAndOpToExtractLowestSetBit(binOp); if (replacementNode != nullptr) { return replacementNode->gtNext; } } #endif ContainCheckBinary(binOp); return binOp->gtNext; } //------------------------------------------------------------------------ // LowerBlockStore: Lower a block store node // // Arguments: // blkNode - The block store node to lower // void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { TryCreateAddrMode(blkNode->Addr(), false, blkNode); GenTree* dstAddr = blkNode->Addr(); GenTree* src = blkNode->Data(); unsigned size = blkNode->Size(); if (blkNode->OperIsInitBlkOp()) { if (src->OperIs(GT_INIT_VAL)) { src->SetContained(); src = src->AsUnOp()->gtGetOp1(); } if (blkNode->OperIs(GT_STORE_OBJ)) { blkNode->SetOper(GT_STORE_BLK); } if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= INITBLK_UNROLL_LIMIT)) { if (!src->OperIs(GT_CNS_INT)) { // TODO-CQ: We could unroll even when the initialization value is not a constant // by inserting a MUL init, 0x01010101 instruction. We need to determine if the // extra latency that MUL introduces isn't worse that rep stosb. Likely not. blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; } else { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; // The fill value of an initblk is interpreted to hold a // value of (unsigned int8) however a constant of any size // may practically reside on the evaluation stack. So extract // the lower byte out of the initVal constant and replicate // it to a larger constant whose size is sufficient to support // the largest width store of the desired inline expansion. ssize_t fill = src->AsIntCon()->IconValue() & 0xFF; if (fill == 0) { if (size >= XMM_REGSIZE_BYTES) { const bool canUse16BytesSimdMov = !blkNode->IsOnHeapAndContainsReferences(); #ifdef TARGET_AMD64 const bool willUseOnlySimdMov = canUse16BytesSimdMov && (size % XMM_REGSIZE_BYTES == 0); #else const bool willUseOnlySimdMov = (size % 8 == 0); #endif if (willUseOnlySimdMov) { src->SetContained(); } } } #ifdef TARGET_AMD64 else if (size >= REGSIZE_BYTES) { fill *= 0x0101010101010101LL; src->gtType = TYP_LONG; } #endif else { fill *= 0x01010101; } src->AsIntCon()->SetIconValue(fill); ContainBlockStoreAddress(blkNode, size, dstAddr); } } else { #ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; #endif } } else { assert(src->OperIs(GT_IND, GT_LCL_VAR, GT_LCL_FLD)); src->SetContained(); if (src->OperIs(GT_IND)) { // TODO-Cleanup: Make sure that GT_IND lowering didn't mark the source address as contained. // Sometimes the GT_IND type is a non-struct type and then GT_IND lowering may contain the // address, not knowing that GT_IND is part of a block op that has containment restrictions. src->AsIndir()->Addr()->ClearContained(); } else if (src->OperIs(GT_LCL_VAR)) { // TODO-1stClassStructs: for now we can't work with STORE_BLOCK source in register. const unsigned srcLclNum = src->AsLclVar()->GetLclNum(); comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::StoreBlkSrc)); } if (blkNode->OperIs(GT_STORE_OBJ)) { if (!blkNode->AsObj()->GetLayout()->HasGCPtr()) { blkNode->SetOper(GT_STORE_BLK); } #ifndef JIT32_GCENCODER else if (dstAddr->OperIsLocalAddr() && (size <= CPBLK_UNROLL_LIMIT)) { // If the size is small enough to unroll then we need to mark the block as non-interruptible // to actually allow unrolling. The generated code does not report GC references loaded in the // temporary register(s) used for copying. // This is not supported for the JIT32_GCENCODER. blkNode->SetOper(GT_STORE_BLK); blkNode->gtBlkOpGcUnsafe = true; } #endif } if (blkNode->OperIs(GT_STORE_OBJ)) { assert((dstAddr->TypeGet() == TYP_BYREF) || (dstAddr->TypeGet() == TYP_I_IMPL)); // If we have a long enough sequence of slots that do not require write barriers then // we can use REP MOVSD/Q instead of a sequence of MOVSD/Q instructions. According to the // Intel Manual, the sweet spot for small structs is between 4 to 12 slots of size where // the entire operation takes 20 cycles and encodes in 5 bytes (loading RCX and REP MOVSD/Q). unsigned nonGCSlots = 0; if (dstAddr->OperIsLocalAddr()) { // If the destination is on the stack then no write barriers are needed. nonGCSlots = blkNode->GetLayout()->GetSlotCount(); } else { // Otherwise a write barrier is needed for every GC pointer in the layout // so we need to check if there's a long enough sequence of non-GC slots. ClassLayout* layout = blkNode->GetLayout(); unsigned slots = layout->GetSlotCount(); for (unsigned i = 0; i < slots; i++) { if (layout->IsGCPtr(i)) { nonGCSlots = 0; } else { nonGCSlots++; if (nonGCSlots >= CPOBJ_NONGC_SLOTS_LIMIT) { break; } } } } if (nonGCSlots >= CPOBJ_NONGC_SLOTS_LIMIT) { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; } else { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; } } else if (blkNode->OperIs(GT_STORE_BLK) && (size <= CPBLK_UNROLL_LIMIT)) { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; if (src->OperIs(GT_IND)) { ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr()); } ContainBlockStoreAddress(blkNode, size, dstAddr); } else { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK)); #ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindRepInstr; #endif } } } //------------------------------------------------------------------------ // ContainBlockStoreAddress: Attempt to contain an address used by an unrolled block store. // // Arguments: // blkNode - the block store node // size - the block size // addr - the address node to try to contain // void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr) { assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)); assert(size < INT32_MAX); if (addr->OperIsLocalAddr()) { addr->SetContained(); return; } if (!addr->OperIsAddrMode() && !TryCreateAddrMode(addr, true, blkNode)) { return; } GenTreeAddrMode* addrMode = addr->AsAddrMode(); // On x64 the address mode displacement is signed so it must not exceed INT32_MAX. This check is // an approximation since the last displacement we generate in an unrolled block operation can be // up to 16 bytes lower than offset + size. But offsets large enough to hit this case are likely // to be extremely rare for this to ever be a CQ issue. // On x86 this shouldn't be needed but then again, offsets large enough to hit this are rare. if (addrMode->Offset() > (INT32_MAX - static_cast<int>(size))) { return; } // Note that the parentNode is always the block node, even if we're dealing with the source address. // The source address is not directly used by the block node but by an IND node and that IND node is // always contained. if (!IsSafeToContainMem(blkNode, addrMode)) { return; } addrMode->SetContained(); } //------------------------------------------------------------------------ // LowerPutArgStk: Lower a GT_PUTARG_STK. // // Arguments: // tree - The node of interest // // Return Value: // None. // void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) { GenTree* src = putArgStk->gtGetOp1(); if (src->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_X86 putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid; GenTreeFieldList* fieldList = src->AsFieldList(); // The code generator will push these fields in reverse order by offset. Reorder the list here s.t. the order // of uses is visible to LSRA. assert(fieldList->Uses().IsSorted()); fieldList->Uses().Reverse(); // Now that the fields have been sorted, the kind of code we will generate. bool allFieldsAreSlots = true; unsigned prevOffset = putArgStk->GetStackByteSize(); for (GenTreeFieldList::Use& use : fieldList->Uses()) { GenTree* const fieldNode = use.GetNode(); const unsigned fieldOffset = use.GetOffset(); assert(!fieldNode->TypeIs(TYP_LONG)); // We can treat as a slot any field that is stored at a slot boundary, where the previous // field is not in the same slot. (Note that we store the fields in reverse order.) const bool fieldIsSlot = ((fieldOffset % 4) == 0) && ((prevOffset - fieldOffset) >= 4); if (!fieldIsSlot) { allFieldsAreSlots = false; } // For x86 we must mark all integral fields as contained or reg-optional, and handle them // accordingly in code generation, since we may have up to 8 fields, which cannot all be in // registers to be consumed atomically by the call. if (varTypeIsIntegralOrI(fieldNode)) { if (fieldNode->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(fieldNode->AsLclVarCommon()); if (!varDsc->lvDoNotEnregister) { fieldNode->SetRegOptional(); } else { MakeSrcContained(putArgStk, fieldNode); } } else if (fieldNode->IsIntCnsFitsInI32()) { MakeSrcContained(putArgStk, fieldNode); } else { // For the case where we cannot directly push the value, if we run out of registers, // it would be better to defer computation until we are pushing the arguments rather // than spilling, but this situation is not all that common, as most cases of promoted // structs do not have a large number of fields, and of those most are lclVars or // copy-propagated constants. fieldNode->SetRegOptional(); } } prevOffset = fieldOffset; } // Set the copy kind. // TODO-X86-CQ: Even if we are using push, if there are contiguous floating point fields, we should // adjust the stack once for those fields. The latter is really best done in code generation, but // this tuning should probably be undertaken as a whole. // Also, if there are floating point fields, it may be better to use the "Unroll" mode // of copying the struct as a whole, if the fields are not register candidates. if (allFieldsAreSlots) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::PushAllSlots; } else { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } #endif // TARGET_X86 return; } #ifdef FEATURE_PUT_STRUCT_ARG_STK if (src->TypeGet() != TYP_STRUCT) #endif // FEATURE_PUT_STRUCT_ARG_STK { // If the child of GT_PUTARG_STK is a constant, we don't need a register to // move it to memory (stack location). // // On AMD64, we don't want to make 0 contained, because we can generate smaller code // by zeroing a register and then storing it. E.g.: // xor rdx, rdx // mov gword ptr [rsp+28H], rdx // is 2 bytes smaller than: // mov gword ptr [rsp+28H], 0 // // On x86, we push stack arguments; we don't use 'mov'. So: // push 0 // is 1 byte smaller than: // xor rdx, rdx // push rdx if (IsContainableImmed(putArgStk, src) #if defined(TARGET_AMD64) && !src->IsIntegralConst(0) #endif // TARGET_AMD64 ) { MakeSrcContained(putArgStk, src); } return; } #ifdef FEATURE_PUT_STRUCT_ARG_STK GenTree* srcAddr = nullptr; bool haveLocalAddr = false; if ((src->OperGet() == GT_OBJ) || (src->OperGet() == GT_IND)) { srcAddr = src->AsOp()->gtOp1; assert(srcAddr != nullptr); haveLocalAddr = srcAddr->OperIsLocalAddr(); } else { assert(varTypeIsSIMD(putArgStk)); } ClassLayout* layout = src->AsObj()->GetLayout(); // In case of a CpBlk we could use a helper call. In case of putarg_stk we // can't do that since the helper call could kill some already set up outgoing args. // TODO-Amd64-Unix: converge the code for putarg_stk with cpyblk/cpyobj. // The cpyXXXX code is rather complex and this could cause it to be more complex, but // it might be the right thing to do. unsigned size = putArgStk->GetStackByteSize(); // TODO-X86-CQ: The helper call either is not supported on x86 or required more work // (I don't know which). if (!layout->HasGCPtr()) { #ifdef TARGET_X86 if (size < XMM_REGSIZE_BYTES) { // Codegen for "Kind::Push" will always load bytes in TARGET_POINTER_SIZE // chunks. As such, the correctness of this code depends on the fact that // morph will copy any "mis-sized" (too small) non-local OBJs into a temp, // thus preventing any possible out-of-bounds memory reads. assert(((layout->GetSize() % TARGET_POINTER_SIZE) == 0) || src->OperIsLocalRead() || (src->OperIsIndir() && src->AsIndir()->Addr()->IsLocalAddrExpr())); putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } else #endif // TARGET_X86 if (size <= CPBLK_UNROLL_LIMIT) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; } else { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; } } else // There are GC pointers. { #ifdef TARGET_X86 // On x86, we must use `push` to store GC references to the stack in order for the emitter to properly update // the function's GC info. These `putargstk` nodes will generate a sequence of `push` instructions. putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; #else // !TARGET_X86 putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::PartialRepInstr; #endif // !TARGET_X86 } // Always mark the OBJ and ADDR as contained trees by the putarg_stk. The codegen will deal with this tree. MakeSrcContained(putArgStk, src); if (haveLocalAddr) { // If the source address is the address of a lclVar, make the source address contained to avoid unnecessary // copies. // MakeSrcContained(putArgStk, srcAddr); } #endif // FEATURE_PUT_STRUCT_ARG_STK } /* Lower GT_CAST(srcType, DstType) nodes. * * Casts from small int type to float/double are transformed as follows: * GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double) * GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double) * GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double) * GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double) * * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64 * are morphed as follows by front-end and hence should not be seen here. * GT_CAST(uint32, float/double) = GT_CAST(GT_CAST(uint32, long), float/double) * GT_CAST(uint64, float) = GT_CAST(GT_CAST(uint64, double), float) * * * Similarly casts from float/double to a smaller int type are transformed as follows: * GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte) * GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte) * GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16) * GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16) * * SSE2 has instructions to convert a float/double vlaue into a signed 32/64-bit * integer. The above transformations help us to leverage those instructions. * * Note that for the following conversions we still depend on helper calls and * don't expect to see them here. * i) GT_CAST(float/double, uint64) * ii) GT_CAST(float/double, int type with overflow detection) * * TODO-XArch-CQ: (Low-pri): Jit64 generates in-line code of 8 instructions for (i) above. * There are hardly any occurrences of this conversion operation in platform * assemblies or in CQ perf benchmarks (1 occurrence in corelib, microsoft.jscript, * 1 occurrence in Roslyn and no occurrences in system, system.core, system.numerics * system.windows.forms, scimark, fractals, bio mums). If we ever find evidence that * doing this optimization is a win, should consider generating in-lined code. */ void Lowering::LowerCast(GenTree* tree) { assert(tree->OperGet() == GT_CAST); GenTree* castOp = tree->AsCast()->CastOp(); var_types castToType = tree->CastToType(); var_types srcType = castOp->TypeGet(); var_types tmpType = TYP_UNDEF; // force the srcType to unsigned if GT_UNSIGNED flag is set if (tree->gtFlags & GTF_UNSIGNED) { srcType = varTypeToUnsigned(srcType); } // We should never see the following casts as they are expected to be lowered // apropriately or converted into helper calls by front-end. // srcType = float/double castToType = * and overflow detecting cast // Reason: must be converted to a helper call // srcType = float/double, castToType = ulong // Reason: must be converted to a helper call // srcType = uint castToType = float/double // Reason: uint -> float/double = uint -> long -> float/double // srcType = ulong castToType = float // Reason: ulong -> float = ulong -> double -> float if (varTypeIsFloating(srcType)) { noway_assert(!tree->gtOverflow()); noway_assert(castToType != TYP_ULONG); } else if (srcType == TYP_UINT) { noway_assert(!varTypeIsFloating(castToType)); } else if (srcType == TYP_ULONG) { noway_assert(castToType != TYP_FLOAT); } // Case of src is a small type and dst is a floating point type. if (varTypeIsSmall(srcType) && varTypeIsFloating(castToType)) { // These conversions can never be overflow detecting ones. noway_assert(!tree->gtOverflow()); tmpType = TYP_INT; } // case of src is a floating point type and dst is a small type. else if (varTypeIsFloating(srcType) && varTypeIsSmall(castToType)) { tmpType = TYP_INT; } if (tmpType != TYP_UNDEF) { GenTree* tmp = comp->gtNewCastNode(tmpType, castOp, tree->IsUnsigned(), tmpType); tmp->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; tree->AsOp()->gtOp1 = tmp; BlockRange().InsertAfter(castOp, tmp); ContainCheckCast(tmp->AsCast()); } // Now determine if we have operands that should be contained. ContainCheckCast(tree->AsCast()); } #ifdef FEATURE_SIMD //---------------------------------------------------------------------------------------------- // Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node. // // Arguments: // simdNode - The SIMD intrinsic node. // void Lowering::LowerSIMD(GenTreeSIMD* simdNode) { if (simdNode->TypeGet() == TYP_SIMD12) { // GT_SIMD node requiring to produce TYP_SIMD12 in fact // produces a TYP_SIMD16 result simdNode->gtType = TYP_SIMD16; } if (simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN) { assert(simdNode->GetSimdBaseType() == TYP_FLOAT); size_t argCount = simdNode->GetOperandCount(); size_t constArgCount = 0; float constArgValues[4]{0, 0, 0, 0}; for (GenTree* arg : simdNode->Operands()) { assert(arg->TypeIs(simdNode->GetSimdBaseType())); if (arg->IsCnsFltOrDbl()) { constArgValues[constArgCount] = static_cast<float>(arg->AsDblCon()->gtDconVal); constArgCount++; } } if (constArgCount == argCount) { for (GenTree* arg : simdNode->Operands()) { BlockRange().Remove(arg); } assert(sizeof(constArgValues) == 16); unsigned cnsSize = sizeof(constArgValues); unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : 1; CORINFO_FIELD_HANDLE hnd = comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->GetSimdBaseType()); GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr); BlockRange().InsertBefore(simdNode, clsVarAddr); simdNode->ChangeOper(GT_IND); simdNode->AsOp()->gtOp1 = clsVarAddr; ContainCheckIndir(simdNode->AsIndir()); return; } } ContainCheckSIMD(simdNode); } #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // LowerHWIntrinsicCC: Lowers a hardware intrinsic node that produces a boolean value by // setting the condition flags. // // Arguments: // node - The hardware intrinsic node // newIntrinsicId - The intrinsic id of the lowered intrinsic node // condition - The condition code of the generated SETCC/JCC node // void Lowering::LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition) { GenTreeCC* cc = LowerNodeCC(node, condition); assert(HWIntrinsicInfo::lookupNumArgs(newIntrinsicId) == 2); node->ChangeHWIntrinsicId(newIntrinsicId); node->gtType = TYP_VOID; node->ClearUnusedValue(); bool swapOperands = false; bool canSwapOperands = false; switch (newIntrinsicId) { case NI_SSE_COMISS: case NI_SSE_UCOMISS: case NI_SSE2_COMISD: case NI_SSE2_UCOMISD: // In some cases we can generate better code if we swap the operands: // - If the condition is not one of the "preferred" floating point conditions we can swap // the operands and change the condition to avoid generating an extra JP/JNP branch. // - If the first operand can be contained but the second cannot, we can swap operands in // order to be able to contain the first operand and avoid the need for a temp reg. // We can't handle both situations at the same time and since an extra branch is likely to // be worse than an extra temp reg (x64 has a reasonable number of XMM registers) we'll favor // the branch case: // - If the condition is not preferred then swap, even if doing this will later prevent // containment. // - Allow swapping for containment purposes only if this doesn't result in a non-"preferred" // condition being generated. if ((cc != nullptr) && cc->gtCondition.PreferSwap()) { swapOperands = true; } else { canSwapOperands = (cc == nullptr) || !GenCondition::Swap(cc->gtCondition).PreferSwap(); } break; case NI_SSE41_PTEST: case NI_AVX_PTEST: // If we need the Carry flag then we can't swap operands. canSwapOperands = (cc == nullptr) || cc->gtCondition.Is(GenCondition::EQ, GenCondition::NE); break; default: unreached(); } if (canSwapOperands) { bool op1SupportsRegOptional = false; bool op2SupportsRegOptional = false; if (!TryGetContainableHWIntrinsicOp(node, &node->Op(2), &op2SupportsRegOptional) && TryGetContainableHWIntrinsicOp(node, &node->Op(1), &op1SupportsRegOptional)) { // Swap operands if op2 cannot be contained but op1 can. swapOperands = true; } } if (swapOperands) { std::swap(node->Op(1), node->Op(2)); if (cc != nullptr) { cc->gtCondition = GenCondition::Swap(cc->gtCondition); } } } //---------------------------------------------------------------------------------------------- // LowerFusedMultiplyAdd: Changes NI_FMA_MultiplyAddScalar produced by Math(F).FusedMultiplyAdd // to a better FMA intrinsics if there are GT_NEG around in order to eliminate them. // // Arguments: // node - The hardware intrinsic node // // Notes: // Math(F).FusedMultiplyAdd is expanded into NI_FMA_MultiplyAddScalar and // depending on additional GT_NEG nodes around it can be: // // x * y + z -> NI_FMA_MultiplyAddScalar // x * -y + z -> NI_FMA_MultiplyAddNegatedScalar // -x * y + z -> NI_FMA_MultiplyAddNegatedScalar // -x * -y + z -> NI_FMA_MultiplyAddScalar // x * y - z -> NI_FMA_MultiplySubtractScalar // x * -y - z -> NI_FMA_MultiplySubtractNegatedScalar // -x * y - z -> NI_FMA_MultiplySubtractNegatedScalar // -x * -y - z -> NI_FMA_MultiplySubtractScalar // void Lowering::LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node) { assert(node->GetHWIntrinsicId() == NI_FMA_MultiplyAddScalar); GenTreeHWIntrinsic* createScalarOps[3]; for (size_t i = 1; i <= 3; i++) { GenTree* arg = node->Op(i); if (!arg->OperIsHWIntrinsic() || (arg->AsHWIntrinsic()->GetHWIntrinsicId() != NI_Vector128_CreateScalarUnsafe)) { return; } createScalarOps[i - 1] = arg->AsHWIntrinsic(); } GenTree* argX = createScalarOps[0]->Op(1); GenTree* argY = createScalarOps[1]->Op(1); GenTree* argZ = createScalarOps[2]->Op(1); const bool negMul = argX->OperIs(GT_NEG) != argY->OperIs(GT_NEG); if (argX->OperIs(GT_NEG)) { createScalarOps[0]->Op(1) = argX->gtGetOp1(); BlockRange().Remove(argX); } if (argY->OperIs(GT_NEG)) { createScalarOps[1]->Op(1) = argY->gtGetOp1(); BlockRange().Remove(argY); } if (argZ->OperIs(GT_NEG)) { createScalarOps[2]->Op(1) = argZ->gtGetOp1(); BlockRange().Remove(argZ); node->ChangeHWIntrinsicId(negMul ? NI_FMA_MultiplySubtractNegatedScalar : NI_FMA_MultiplySubtractScalar); } else { node->ChangeHWIntrinsicId(negMul ? NI_FMA_MultiplyAddNegatedScalar : NI_FMA_MultiplyAddScalar); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node. // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) { if (node->TypeGet() == TYP_SIMD12) { // GT_HWINTRINSIC node requiring to produce TYP_SIMD12 in fact // produces a TYP_SIMD16 result node->gtType = TYP_SIMD16; } NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); switch (intrinsicId) { case NI_Vector128_Create: case NI_Vector256_Create: { // We don't directly support the Vector128.Create or Vector256.Create methods in codegen // and instead lower them to other intrinsic nodes in LowerHWIntrinsicCreate so we expect // that the node is modified to either not be a HWIntrinsic node or that it is no longer // the same intrinsic as when it came in. In the case of Vector256.Create, we may lower // it into 2x Vector128.Create intrinsics which themselves are also lowered into other // intrinsics that are not Vector*.Create LowerHWIntrinsicCreate(node); assert(!node->OperIsHWIntrinsic() || (node->GetHWIntrinsicId() != intrinsicId)); LowerNode(node); return; } case NI_Vector128_Dot: case NI_Vector256_Dot: { LowerHWIntrinsicDot(node); return; } case NI_Vector128_GetElement: case NI_Vector256_GetElement: { LowerHWIntrinsicGetElement(node); if ((node->GetHWIntrinsicId() == NI_Vector128_GetElement) || (node->GetHWIntrinsicId() == NI_Vector256_GetElement)) { // Most NI_Vector*_GetElement intrinsics are lowered to // alternative nodes, such as the Extract intrinsics, // which are themselves lowered. // // However, certain types may not have a direct equivalent // in which case we specially handle them directly as GetElement // and want to do the relevant containment checks. break; } return; } case NI_Vector128_WithElement: case NI_Vector256_WithElement: { LowerHWIntrinsicWithElement(node); return; } case NI_Vector128_op_Equality: case NI_Vector256_op_Equality: { LowerHWIntrinsicCmpOp(node, GT_EQ); return; } case NI_Vector128_op_Inequality: case NI_Vector256_op_Inequality: { LowerHWIntrinsicCmpOp(node, GT_NE); return; } case NI_Vector128_ToScalar: case NI_Vector256_ToScalar: { LowerHWIntrinsicToScalar(node); break; } case NI_SSE41_Extract: { if (varTypeIsFloating(node->GetSimdBaseType())) { assert(node->GetSimdBaseType() == TYP_FLOAT); assert(node->GetSimdSize() == 16); GenTree* op2 = node->Op(2); if (!op2->OperIsConst()) { // Extract allows the full range while GetElement only allows // 0-3, so we need to mask the index here so codegen works. GenTree* msk = comp->gtNewIconNode(3, TYP_INT); BlockRange().InsertAfter(op2, msk); GenTree* tmp = comp->gtNewOperNode(GT_AND, TYP_INT, op2, msk); BlockRange().InsertAfter(msk, tmp); LowerNode(tmp); node->Op(2) = tmp; } node->ChangeHWIntrinsicId(NI_Vector128_GetElement); LowerNode(node); } break; } case NI_SSE2_Insert: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: { assert(node->GetOperandCount() == 3); // Insert takes either a 32-bit register or a memory operand. // In either case, only SimdBaseType bits are read and so // widening or narrowing the operand may be unnecessary and it // can just be used directly. node->Op(2) = TryRemoveCastIfPresent(node->GetSimdBaseType(), node->Op(2)); break; } case NI_SSE42_Crc32: { assert(node->GetOperandCount() == 2); // Crc32 takes either a bit register or a memory operand. // In either case, only gtType bits are read and so widening // or narrowing the operand may be unnecessary and it can // just be used directly. node->Op(2) = TryRemoveCastIfPresent(node->TypeGet(), node->Op(2)); break; } case NI_SSE2_CompareGreaterThan: { if (node->GetSimdBaseType() != TYP_DOUBLE) { assert(varTypeIsIntegral(node->GetSimdBaseType())); break; } FALLTHROUGH; } case NI_SSE_CompareGreaterThan: case NI_SSE_CompareGreaterThanOrEqual: case NI_SSE_CompareNotGreaterThan: case NI_SSE_CompareNotGreaterThanOrEqual: case NI_SSE2_CompareGreaterThanOrEqual: case NI_SSE2_CompareNotGreaterThan: case NI_SSE2_CompareNotGreaterThanOrEqual: { assert((node->GetSimdBaseType() == TYP_FLOAT) || (node->GetSimdBaseType() == TYP_DOUBLE)); if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX)) { break; } // pre-AVX doesn't actually support these intrinsics in hardware so we need to swap the operands around std::swap(node->Op(1), node->Op(2)); break; } case NI_SSE2_CompareLessThan: case NI_SSE42_CompareLessThan: case NI_AVX2_CompareLessThan: { if (node->GetSimdBaseType() == TYP_DOUBLE) { break; } assert(varTypeIsIntegral(node->GetSimdBaseType())); // this isn't actually supported in hardware so we need to swap the operands around std::swap(node->Op(1), node->Op(2)); break; } case NI_SSE_CompareScalarOrderedEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FEQ); break; case NI_SSE_CompareScalarOrderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FNEU); break; case NI_SSE_CompareScalarOrderedLessThan: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FLT); break; case NI_SSE_CompareScalarOrderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FLE); break; case NI_SSE_CompareScalarOrderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FGT); break; case NI_SSE_CompareScalarOrderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_COMISS, GenCondition::FGE); break; case NI_SSE_CompareScalarUnorderedEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FEQ); break; case NI_SSE_CompareScalarUnorderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FNEU); break; case NI_SSE_CompareScalarUnorderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FLE); break; case NI_SSE_CompareScalarUnorderedLessThan: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FLT); break; case NI_SSE_CompareScalarUnorderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FGE); break; case NI_SSE_CompareScalarUnorderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE_UCOMISS, GenCondition::FGT); break; case NI_SSE2_CompareScalarOrderedEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FEQ); break; case NI_SSE2_CompareScalarOrderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FNEU); break; case NI_SSE2_CompareScalarOrderedLessThan: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FLT); break; case NI_SSE2_CompareScalarOrderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FLE); break; case NI_SSE2_CompareScalarOrderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FGT); break; case NI_SSE2_CompareScalarOrderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_COMISD, GenCondition::FGE); break; case NI_SSE2_CompareScalarUnorderedEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FEQ); break; case NI_SSE2_CompareScalarUnorderedNotEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FNEU); break; case NI_SSE2_CompareScalarUnorderedLessThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FLE); break; case NI_SSE2_CompareScalarUnorderedLessThan: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FLT); break; case NI_SSE2_CompareScalarUnorderedGreaterThanOrEqual: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FGE); break; case NI_SSE2_CompareScalarUnorderedGreaterThan: LowerHWIntrinsicCC(node, NI_SSE2_UCOMISD, GenCondition::FGT); break; case NI_SSE41_TestC: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::C); break; case NI_SSE41_TestZ: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::EQ); break; case NI_SSE41_TestNotZAndNotC: LowerHWIntrinsicCC(node, NI_SSE41_PTEST, GenCondition::UGT); break; case NI_AVX_TestC: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::C); break; case NI_AVX_TestZ: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::EQ); break; case NI_AVX_TestNotZAndNotC: LowerHWIntrinsicCC(node, NI_AVX_PTEST, GenCondition::UGT); break; case NI_FMA_MultiplyAddScalar: LowerFusedMultiplyAdd(node); break; default: break; } ContainCheckHWIntrinsic(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicCmpOp: Lowers a Vector128 or Vector256 comparison intrinsic // // Arguments: // node - The hardware intrinsic node. // cmpOp - The comparison operation, currently must be GT_EQ or GT_NE // void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality) || (intrinsicId == NI_Vector256_op_Equality) || (intrinsicId == NI_Vector256_op_Inequality)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); assert(node->gtType == TYP_BOOL); assert((cmpOp == GT_EQ) || (cmpOp == GT_NE)); // We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality): // /--* op2 simd // /--* op1 simd // node = * HWINTRINSIC simd T op_Equality GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); GenCondition cmpCnd = (cmpOp == GT_EQ) ? GenCondition::EQ : GenCondition::NE; if (op2->IsIntegralConstVector(0) && comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // On SSE4.1 or higher we can optimize comparisons against zero to // just use PTEST. We can't support it for floating-point, however, // as it has both +0.0 and -0.0 where +0.0 == -0.0 node->Op(1) = op1; BlockRange().Remove(op2); if (op2->AsMultiOp()->GetOperandCount() == 1) { // Some zero vectors are Create/Initialization nodes with a constant zero operand // We should also remove this to avoid dead code assert(op2->AsMultiOp()->Op(1)->IsIntegralConst(0)); BlockRange().Remove(op2->AsMultiOp()->Op(1)); } LIR::Use op1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(op1Use); op1 = node->Op(1); op2 = comp->gtClone(op1); BlockRange().InsertAfter(op1, op2); node->Op(2) = op2; if (simdSize == 32) { // TODO-Review: LowerHWIntrinsicCC resets the id again, so why is this needed? node->ChangeHWIntrinsicId(NI_AVX_TestZ); LowerHWIntrinsicCC(node, NI_AVX_PTEST, cmpCnd); } else { // TODO-Review: LowerHWIntrinsicCC resets the id again, so why is this needed? node->ChangeHWIntrinsicId(NI_SSE41_TestZ); LowerHWIntrinsicCC(node, NI_SSE41_PTEST, cmpCnd); } return; } NamedIntrinsic cmpIntrinsic; CorInfoType cmpJitType; NamedIntrinsic mskIntrinsic; CorInfoType mskJitType; int mskConstant; switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { cmpJitType = simdBaseJitType; mskJitType = CORINFO_TYPE_UBYTE; if (simdSize == 32) { cmpIntrinsic = NI_AVX2_CompareEqual; mskIntrinsic = NI_AVX2_MoveMask; mskConstant = -1; } else { assert(simdSize == 16); cmpIntrinsic = NI_SSE2_CompareEqual; mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0xFFFF; } break; } case TYP_LONG: case TYP_ULONG: { mskJitType = CORINFO_TYPE_UBYTE; if (simdSize == 32) { cmpIntrinsic = NI_AVX2_CompareEqual; cmpJitType = simdBaseJitType; mskIntrinsic = NI_AVX2_MoveMask; mskConstant = -1; } else { assert(simdSize == 16); if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { cmpIntrinsic = NI_SSE41_CompareEqual; cmpJitType = simdBaseJitType; } else { cmpIntrinsic = NI_SSE2_CompareEqual; cmpJitType = CORINFO_TYPE_UINT; } mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0xFFFF; } break; } case TYP_FLOAT: { cmpJitType = simdBaseJitType; mskJitType = simdBaseJitType; if (simdSize == 32) { cmpIntrinsic = NI_AVX_CompareEqual; mskIntrinsic = NI_AVX_MoveMask; mskConstant = 0xFF; } else { cmpIntrinsic = NI_SSE_CompareEqual; mskIntrinsic = NI_SSE_MoveMask; if (simdSize == 16) { mskConstant = 0xF; } else if (simdSize == 12) { mskConstant = 0x7; } else { assert(simdSize == 8); mskConstant = 0x3; } } break; } case TYP_DOUBLE: { cmpJitType = simdBaseJitType; mskJitType = simdBaseJitType; if (simdSize == 32) { cmpIntrinsic = NI_AVX_CompareEqual; mskIntrinsic = NI_AVX_MoveMask; mskConstant = 0xF; } else { assert(simdSize == 16); cmpIntrinsic = NI_SSE2_CompareEqual; mskIntrinsic = NI_SSE2_MoveMask; mskConstant = 0x3; } break; } default: { unreached(); } } GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpJitType, simdSize); BlockRange().InsertBefore(node, cmp); LowerNode(cmp); GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskJitType, simdSize); BlockRange().InsertAfter(cmp, msk); LowerNode(msk); GenTree* mskCns = comp->gtNewIconNode(mskConstant, TYP_INT); BlockRange().InsertAfter(msk, mskCns); if ((simdBaseType == TYP_FLOAT) && (simdSize < 16)) { // For TYP_SIMD8 and TYP_SIMD12 we need to clear the upper bits and can't assume their value GenTree* tmp = comp->gtNewOperNode(GT_AND, TYP_INT, msk, mskCns); BlockRange().InsertAfter(mskCns, tmp); LowerNode(tmp); msk = tmp; mskCns = comp->gtNewIconNode(mskConstant, TYP_INT); BlockRange().InsertAfter(msk, mskCns); } node->ChangeOper(cmpOp); node->ChangeType(TYP_INT); node->AsOp()->gtOp1 = msk; node->AsOp()->gtOp2 = mskCns; GenTree* cc = LowerNodeCC(node, cmpCnd); node->gtType = TYP_VOID; node->ClearUnusedValue(); LowerNode(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicCreate: Lowers a Vector128 or Vector256 Create call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->gtType; CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); VectorConstant vecCns = {}; if ((simdSize == 8) && (simdType == TYP_DOUBLE)) { // TODO-Cleanup: Struct retyping means we have the wrong type here. We need to // manually fix it up so the simdType checks below are correct. simdType = TYP_SIMD8; } assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTree* tmp3 = nullptr; size_t argCnt = node->GetOperandCount(); size_t cnsArgCnt = 0; // These intrinsics are meant to set the same value to every element. if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, vecCns, simdBaseType)) { // Now assign the rest of the arguments. for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++) { HandleArgForHWIntrinsicCreate(node->Op(1), i, vecCns, simdBaseType); } cnsArgCnt = 1; } else { for (unsigned i = 1; i <= argCnt; i++) { if (HandleArgForHWIntrinsicCreate(node->Op(i), i - 1, vecCns, simdBaseType)) { cnsArgCnt++; } } } assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType)))); if (argCnt == cnsArgCnt) { for (GenTree* arg : node->Operands()) { #if !defined(TARGET_64BIT) if (arg->OperIsLong()) { BlockRange().Remove(arg->AsOp()->gtGetOp1()); BlockRange().Remove(arg->AsOp()->gtGetOp2()); } #endif // !TARGET_64BIT BlockRange().Remove(arg); } assert((simdSize == 8) || (simdSize == 12) || (simdSize == 16) || (simdSize == 32)); if (((simdSize == 16) || (simdSize == 32)) && VectorConstantIsBroadcastedI64(vecCns, simdSize / 8)) { // If we are a single constant or if all parts are the same, we might be able to optimize // this even further for certain values, such as Zero or AllBitsSet. if (vecCns.i64[0] == 0) { node->ResetHWIntrinsicId((simdSize == 16) ? NI_Vector128_get_Zero : NI_Vector256_get_Zero); return; } else if (vecCns.i64[0] == -1) { node->ResetHWIntrinsicId((simdSize == 16) ? NI_Vector128_get_AllBitsSet : NI_Vector256_get_AllBitsSet); return; } } unsigned cnsSize = (simdSize != 12) ? simdSize : 16; unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : emitter::dataSection::MIN_DATA_ALIGN; var_types dataType = Compiler::getSIMDTypeForSize(simdSize); UNATIVE_OFFSET cnum = comp->GetEmitter()->emitDataConst(&vecCns, cnsSize, cnsAlign, dataType); CORINFO_FIELD_HANDLE hnd = comp->eeFindJitDataOffs(cnum); GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr); BlockRange().InsertBefore(node, clsVarAddr); node->ChangeOper(GT_IND); node->AsOp()->gtOp1 = clsVarAddr; // TODO-XARCH-CQ: We should be able to modify at least the paths that use Insert to trivially support partial // vector constants. With this, we can create a constant if say 50% of the inputs are also constant and just // insert the non-constant values which should still allow some gains. return; } else if (argCnt == 1) { // We have the following (where simd is simd16 or simd32): // /--* op1 T // node = * HWINTRINSIC simd T Create if (intrinsicId == NI_Vector256_Create) { if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) { // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // node = * HWINTRINSIC simd32 T BroadcastScalarToVector256 // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // return Avx2.BroadcastScalarToVector256(tmp1); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); node->ResetHWIntrinsicId(NI_AVX2_BroadcastScalarToVector256, tmp1); return; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T Create // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T ToVector256Unsafe // idx = CNS_INT int 0 // /--* tmp3 simd32 // +--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd32 T InsertVector128 // This is roughly the following managed code: // var tmp1 = Vector128.Create(op1); // var tmp2 = tmp1; // var tmp3 = tmp2.ToVector256Unsafe(); // return Avx.InsertVector128(tmp3, tmp1, 0x01); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp3, idx); node->ResetHWIntrinsicId(NI_AVX_InsertVector128, comp, tmp3, tmp1, idx); return; } // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // ... // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); if ((simdBaseJitType != CORINFO_TYPE_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2)) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 T BroadcastScalarToVector128 // This is roughly the following managed code: // ... // return Avx2.BroadcastScalarToVector128(tmp1); node->ChangeHWIntrinsicId(NI_AVX2_BroadcastScalarToVector128, tmp1); return; } switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { // We will be constructing the following parts: // ... // tmp2 = HWINTRINSIC simd16 ubyte get_Zero // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 ubyte Shuffle // This is roughly the following managed code: // ... // var tmp2 = Vector128<byte>.Zero; // return Ssse3.Shuffle(tmp1, tmp2); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(tmp1, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSSE3_Shuffle, tmp1, tmp2); break; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 ubyte UnpackLow // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp1 = Sse2.UnpackLow(tmp1, tmp2); // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); FALLTHROUGH; } case TYP_SHORT: case TYP_USHORT: { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 ushort UnpackLow // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp1 = Sse2.UnpackLow(tmp1, tmp2); // ... assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT, simdSize); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); FALLTHROUGH; } case TYP_INT: case TYP_UINT: { // We will be constructing the following parts: // ... // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd16 uint Shuffle // This is roughly the following managed code: // ... // return Sse2.Shuffle(tmp1, 0x00); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp1, idx); node->ResetHWIntrinsicId(NI_SSE2_Shuffle, tmp1, idx); node->SetSimdBaseJitType(CORINFO_TYPE_UINT); break; } #if defined(TARGET_AMD64) case TYP_LONG: case TYP_ULONG: { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 ulong UnpackLow // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse2.UnpackLow(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); break; } #endif // TARGET_AMD64 case TYP_FLOAT: { if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX)) { // We will be constructing the following parts: // ... // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* idx int // node = * HWINTRINSIC simd16 float Permute // This is roughly the following managed code: // ... // return Avx.Permute(tmp1, 0x00); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp1, idx); node->ResetHWIntrinsicId(NI_AVX_Permute, tmp1, idx); break; } // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* tmp2 simd16 // +--* idx int // node = * HWINTRINSIC simd16 float Shuffle // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse.Shuffle(tmp1, tmp2, 0x00); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE)); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x00, TYP_INT); BlockRange().InsertAfter(tmp2, idx); node->ResetHWIntrinsicId(NI_SSE_Shuffle, comp, tmp1, tmp2, idx); break; } case TYP_DOUBLE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 double MoveAndDuplicate // This is roughly the following managed code: // ... // return Sse3.MoveAndDuplicate(tmp1); node->ChangeHWIntrinsicId(NI_SSE3_MoveAndDuplicate, tmp1); break; } assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 float MoveLowToHigh // This is roughly the following managed code: // ... // var tmp2 = tmp1; // return Sse.MoveLowToHigh(tmp1, tmp2); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); break; } default: { unreached(); } } return; } GenTree* op2 = node->Op(2); // We have the following (where simd is simd16 or simd32): // /--* op1 T // +--* ... T // +--* opN T // node = * HWINTRINSIC simd T Create if (intrinsicId == NI_Vector256_Create) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // /--* op1 T // +--* ... T // lo = * HWINTRINSIC simd16 T Create // /--* ... T // +--* opN T // hi = * HWINTRINSIC simd16 T Create // idx = CNS_INT int 1 // /--* lo simd32 // +--* hi simd16 // +--* idx int // node = * HWINTRINSIC simd32 T InsertVector128 // This is roughly the following managed code: // ... // var lo = Vector128.Create(op1, ...); // var hi = Vector128.Create(..., opN); // return Avx.InsertVector128(lo, hi, 0x01); // Each Vector128.Create call gets half the operands. That is: // lo = Vector128.Create(op1, op2); // hi = Vector128.Create(op3, op4); // -or- // lo = Vector128.Create(op1, ..., op4); // hi = Vector128.Create(op5, ..., op8); // -or- // lo = Vector128.Create(op1, ..., op8); // hi = Vector128.Create(op9, ..., op16); // -or- // lo = Vector128.Create(op1, ..., op16); // hi = Vector128.Create(op17, ..., op32); size_t halfArgCnt = argCnt / 2; assert((halfArgCnt * 2) == argCnt); GenTree* lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, node->GetOperandArray(), halfArgCnt, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(node->Op(halfArgCnt), lo); LowerNode(lo); GenTree* hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, node->GetOperandArray(halfArgCnt), halfArgCnt, NI_Vector128_Create, simdBaseJitType, 16); BlockRange().InsertAfter(node->Op(argCnt), hi); LowerNode(hi); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(hi, idx); assert(argCnt >= 3); node->ResetHWIntrinsicId(NI_AVX_InsertVector128, comp, lo, hi, idx); return; } // We will be constructing the following parts: // /--* op1 T // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // ... // This is roughly the following managed code: // var tmp1 = Vector128.CreateScalarUnsafe(op1); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); switch (simdBaseType) { case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { unsigned N = 0; GenTree* opN = nullptr; NamedIntrinsic insIntrinsic = NI_Illegal; if ((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); insIntrinsic = NI_SSE2_Insert; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { insIntrinsic = NI_SSE41_Insert; } if (insIntrinsic != NI_Illegal) { for (N = 1; N < argCnt - 1; N++) { // We will be constructing the following parts: // ... // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // tmp1 = * HWINTRINSIC simd16 T Insert // ... // This is roughly the following managed code: // ... // tmp1 = Sse?.Insert(tmp1, opN, N); // ... opN = node->Op(N + 1); idx = comp->gtNewIconNode(N, TYP_INT); BlockRange().InsertAfter(opN, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } assert(N == (argCnt - 1)); // We will be constructing the following parts: // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // tmp1 = Sse?.Insert(tmp1, opN, N); // ... opN = node->Op(argCnt); idx = comp->gtNewIconNode(N, TYP_INT); BlockRange().InsertAfter(opN, idx); node->ResetHWIntrinsicId(insIntrinsic, comp, tmp1, opN, idx); break; } assert((simdBaseType != TYP_SHORT) && (simdBaseType != TYP_USHORT)); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); GenTree* op[16]; op[0] = tmp1; for (N = 1; N < argCnt; N++) { opN = node->Op(N + 1); op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, op[N]); LowerNode(op[N]); } if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_UBYTE)) { for (N = 0; N < argCnt; N += 4) { // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T UnpackLow // ... // This is roughly the following managed code: // ... // tmp1 = Sse2.UnpackLow(opN, opO); // tmp2 = Sse2.UnpackLow(opP, opQ); // tmp3 = Sse2.UnpackLow(tmp1, tmp2); // ... unsigned O = N + 1; unsigned P = N + 2; unsigned Q = N + 3; tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[N], op[O], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(op[O], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[P], op[Q], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE, simdSize); BlockRange().InsertAfter(op[Q], tmp2); LowerNode(tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT, simdSize); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); // This caches the result in index 0 through 3, depending on which // loop iteration this is and allows the rest of the logic to be // shared with the TYP_INT and TYP_UINT path. op[N / 4] = tmp3; } } // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T UnpackLow // This is roughly the following managed code: // ... // tmp1 = Sse2.UnpackLow(opN, opO); // tmp2 = Sse2.UnpackLow(opP, opQ); // return Sse2.UnpackLow(tmp1, tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize); BlockRange().InsertAfter(op[1], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize); BlockRange().InsertAfter(op[3], tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_ULONG); break; } #if defined(TARGET_AMD64) case TYP_LONG: case TYP_ULONG: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41_X64)) { // We will be constructing the following parts: // ... // idx = CNS_INT int 1 // /--* tmp1 simd16 // +--* op2 T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // return Sse41.X64.Insert(tmp1, op2, 0x01); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertBefore(node, idx); node->ResetHWIntrinsicId(NI_SSE41_X64_Insert, comp, tmp1, op2, idx); break; } // We will be constructing the following parts: // ... // /--* op2 T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T UnpackLow // This is roughly the following managed code: // ... // var tmp2 = Vector128.CreateScalarUnsafe(op2); // return Sse2.UnpackLow(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE2_UnpackLow, tmp1, tmp2); break; } #endif // TARGET_AMD64 case TYP_FLOAT: { unsigned N = 0; GenTree* opN = nullptr; if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { for (N = 1; N < argCnt - 1; N++) { // We will be constructing the following parts: // ... // // /--* opN T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // tmp1 = * HWINTRINSIC simd16 T Insert // ... // This is roughly the following managed code: // ... // tmp2 = Vector128.CreateScalarUnsafe(opN); // tmp1 = Sse41.Insert(tmp1, tmp2, N << 4); // ... opN = node->Op(N + 1); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode(N << 4, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_SSE41_Insert, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } // We will be constructing the following parts: // ... // // /--* opN T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // idx = CNS_INT int N // /--* tmp1 simd16 // +--* opN T // +--* idx int // node = * HWINTRINSIC simd16 T Insert // This is roughly the following managed code: // ... // tmp2 = Vector128.CreateScalarUnsafe(opN); // return Sse41.Insert(tmp1, tmp2, N << 4); opN = node->Op(argCnt); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode((argCnt - 1) << 4, TYP_INT); BlockRange().InsertAfter(tmp2, idx); node->ResetHWIntrinsicId(NI_SSE41_Insert, comp, tmp1, tmp2, idx); break; } // We will be constructing the following parts: // ... // /--* opN T // opN = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opO T // opO = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opN simd16 // +--* opO simd16 // tmp1 = * HWINTRINSIC simd16 T UnpackLow // /--* opP T // opP = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opQ T // opQ = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* opP simd16 // +--* opQ simd16 // tmp2 = * HWINTRINSIC simd16 T UnpackLow // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T MoveLowToHigh // This is roughly the following managed code: // ... // tmp1 = Sse.UnpackLow(opN, opO); // tmp2 = Sse.UnpackLow(opP, opQ); // return Sse.MoveLowToHigh(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE)); GenTree* op[4]; op[0] = tmp1; for (N = 1; N < argCnt; N++) { opN = node->Op(N + 1); op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(opN, op[N]); LowerNode(op[N]); } tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE_UnpackLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(op[1], tmp1); LowerNode(tmp1); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE_UnpackLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(op[3], tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); break; } case TYP_DOUBLE: { // We will be constructing the following parts: // ... // /--* op2 T // tmp2 = * HWINTRINSIC simd16 T CreateScalarUnsafe // /--* tmp1 simd16 // +--* tmp2 simd16 // node = * HWINTRINSIC simd16 T MoveLowToHigh // This is roughly the following managed code: // ... // var tmp2 = Vector128.CreateScalarUnsafe(op2); // return Sse.MoveLowToHigh(tmp1, tmp2); assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16); BlockRange().InsertAfter(op2, tmp2); LowerNode(tmp2); node->ResetHWIntrinsicId(NI_SSE_MoveLowToHigh, tmp1, tmp2); node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT); break; } default: { unreached(); } } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicGetElement: Lowers a Vector128 or Vector256 GetElement call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->gtType; CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(!varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); if (op1->OperIs(GT_IND)) { // If the vector is already in memory, we force its // addr to be evaluated into a reg. This would allow // us to generate [regBase] or [regBase + offset] or // [regBase + sizeOf(simdBaseType) * regIndex] to access // the required vector element directly from memory. // // TODO-CQ-XARCH: If addr of GT_IND is GT_LEA, we // might be able update GT_LEA to fold the regIndex // or offset in some cases. Instead with this // approach we always evaluate GT_LEA into a reg. // Ideally, we should be able to lower GetItem intrinsic // into GT_IND(newAddr) where newAddr combines // the addr of the vector with the given index. op1->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; } if (!op2->OperIsConst()) { // We will specially handle GetElement in codegen when op2 isn't a constant return; } // We should have a bounds check inserted for any index outside the allowed range // but we need to generate some code anyways, and so we'll simply mask here for simplicity. ssize_t count = simdSize / genTypeSize(simdBaseType); ssize_t imm8 = static_cast<uint8_t>(op2->AsIntCon()->IconValue()) % count; assert(0 <= imm8 && imm8 < count); if (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) { // We will specially handle GetElement in codegen when op1 is already in memory op2->AsIntCon()->SetIconValue(imm8); return; } switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; case TYP_LONG: case TYP_ULONG: // We either support TYP_LONG or we have been decomposed into two TYP_INT inserts assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41_X64)); break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); break; default: unreached(); } // Remove the index node up front to simplify downstream logic BlockRange().Remove(op2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; if (intrinsicId == NI_Vector256_GetElement) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); if (imm8 >= count / 2) { // idx = CNS_INT int 1 // /--* op1 simd32 // +--* idx int // op1 = * HWINTRINSIC simd32 T ExtractVector128 // This is roughly the following managed code: // ... // op1 = Avx.ExtractVector128(op1, 0x01); imm8 -= count / 2; idx = comp->gtNewIconNode(1); BlockRange().InsertBefore(node, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } else { // /--* op1 simd32 // op1 = * HWINTRINSIC simd32 T GetLower // This is roughly the following managed code: // ... // op1 = op1.GetLower(); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector256_GetLower, simdBaseJitType, 16); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); } op1 = tmp1; } NamedIntrinsic resIntrinsic = NI_Illegal; if (imm8 == 0 && (genTypeSize(simdBaseType) >= 4)) { switch (simdBaseType) { case TYP_LONG: resIntrinsic = NI_SSE2_X64_ConvertToInt64; break; case TYP_ULONG: resIntrinsic = NI_SSE2_X64_ConvertToUInt64; break; case TYP_INT: resIntrinsic = NI_SSE2_ConvertToInt32; break; case TYP_UINT: resIntrinsic = NI_SSE2_ConvertToUInt32; break; case TYP_FLOAT: case TYP_DOUBLE: resIntrinsic = NI_Vector128_ToScalar; break; default: unreached(); } node->ResetHWIntrinsicId(resIntrinsic, op1); } else { op2 = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(node, op2); switch (simdBaseType) { case TYP_LONG: case TYP_ULONG: { resIntrinsic = NI_SSE41_X64_Extract; break; } case TYP_FLOAT: case TYP_DOUBLE: { // We specially handle float and double for more efficient codegen resIntrinsic = NI_Vector128_GetElement; break; } case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: { resIntrinsic = NI_SSE41_Extract; break; } case TYP_SHORT: case TYP_USHORT: { resIntrinsic = NI_SSE2_Extract; break; } default: unreached(); } node->ResetHWIntrinsicId(resIntrinsic, op1, op2); } node->SetSimdSize(16); if (!varTypeIsFloating(simdBaseType)) { assert(node->GetHWIntrinsicId() != intrinsicId); LowerNode(node); } if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_SHORT)) { // The intrinsic zeros the upper bits, so we need an explicit // cast to ensure the result is properly sign extended LIR::Use use; bool foundUse = BlockRange().TryGetUse(node, &use); GenTreeCast* cast = comp->gtNewCastNode(TYP_INT, node, /* isUnsigned */ true, simdBaseType); BlockRange().InsertAfter(node, cast); if (foundUse) { use.ReplaceWith(cast); } LowerNode(cast); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicWithElement: Lowers a Vector128 or Vector256 WithElement call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); var_types simdType = node->TypeGet(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); GenTree* op3 = node->Op(3); assert(op2->OperIsConst()); ssize_t imm8 = op2->AsIntCon()->IconValue(); ssize_t cachedImm8 = imm8; ssize_t count = simdSize / genTypeSize(simdBaseType); assert(0 <= imm8 && imm8 < count); switch (simdBaseType) { // Using software fallback if simdBaseType is not supported by hardware case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; case TYP_LONG: case TYP_ULONG: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41_X64)); break; case TYP_DOUBLE: case TYP_FLOAT: case TYP_SHORT: case TYP_USHORT: assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); break; default: unreached(); } // Remove the index node up front to simplify downstream logic BlockRange().Remove(op2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTreeHWIntrinsic* result = node; // If we have a simd32 WithElement, we will spill the original // simd32 source into a local, extract the lower/upper half from // it and then operate on that. At the end, we will insert the simd16 // result back into the simd32 local, producing our final value. if (intrinsicId == NI_Vector256_WithElement) { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // This copy of "node" will have the simd16 value we need. result = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, intrinsicId, simdBaseJitType, 16); BlockRange().InsertBefore(node, result); // We will be constructing the following parts: // ... // /--* op1 simd32 // * STORE_LCL_VAR simd32 // tmp32 = LCL_VAR simd32 // op1 = LCL_VAR simd32 // TODO-CQ: move the tmp32 node closer to the final InsertVector128. LIR::Use op1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(op1Use); GenTree* tmp32 = node->Op(1); op1 = comp->gtClone(tmp32); BlockRange().InsertBefore(op3, op1); if (imm8 >= count / 2) { // We will be constructing the following parts: // ... // idx = CNS_INT int 1 // /--* op1 simd32 // +--* idx int // op1 = * HWINTRINSIC simd32 T ExtractVector128 // This is roughly the following managed code: // ... // op1 = Avx.ExtractVector128(op1, 0x01); imm8 -= count / 2; idx = comp->gtNewIconNode(1); BlockRange().InsertAfter(op1, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); } else { // We will be constructing the following parts: // ... // /--* op1 simd32 // op1 = * HWINTRINSIC simd32 T GetLower // This is roughly the following managed code: // ... // op1 = op1.GetLower(); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize); BlockRange().InsertAfter(op1, tmp1); LowerNode(tmp1); } op1 = tmp1; // Now we will insert our "result" into our simd32 temporary. idx = comp->gtNewIconNode((cachedImm8 >= count / 2) ? 1 : 0); BlockRange().InsertBefore(node, idx); node->ChangeHWIntrinsicId(NI_AVX_InsertVector128, tmp32, result, idx); } switch (simdBaseType) { case TYP_LONG: case TYP_ULONG: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE41_X64_Insert, op1, op3, idx); break; } case TYP_FLOAT: { // We will be constructing the following parts: // ... // /--* op3 float // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // This is roughly the following managed code: // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op3, NI_Vector128_CreateScalarUnsafe, CORINFO_TYPE_FLOAT, 16); BlockRange().InsertBefore(result, tmp1); LowerNode(tmp1); if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { if (imm8 == 0) { // We will be constructing the following parts: // ... // /--* op1 simd16 // +--* op2 simd16 // node = * HWINTRINSIC simd16 T MoveScalar // This is roughly the following managed code: // ... // node = Sse.MoveScalar(op1, op2); result->ResetHWIntrinsicId(NI_SSE_MoveScalar, op1, tmp1); } else { // We will be constructing the following parts: // ... // /--* op1 simd16 // * STORE_LCL_VAR simd16 // op2 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0 // /--* tmp1 simd16 // +--* tmp2 simd16 // +--* idx int // op1 = * HWINTRINSIC simd16 T Shuffle // idx = CNS_INT int 226 // /--* op1 simd16 // +--* tmp2 simd16 // +--* idx int // op1 = * HWINTRINSIC simd16 T Shuffle // This is roughly the following managed code: // ... // tmp2 = Sse.Shuffle(tmp1, op1, 0 or 48 or 32); // node = Sse.Shuffle(tmp2, op1, 226 or 132 or 36); result->Op(1) = op1; LIR::Use op1Use(BlockRange(), &result->Op(1), result); ReplaceWithLclVar(op1Use); op2 = result->Op(1); tmp2 = comp->gtClone(op2); BlockRange().InsertAfter(tmp1, tmp2); ssize_t controlBits1; ssize_t controlBits2; // The comments beside the control bits below are listed using the managed API operands // // In practice, for the first step the value being inserted (op3) is in tmp1 // while the other elements of the result (op1) are in tmp2. The result ends // up containing the value being inserted and its immediate neighbor. // // The second step takes that result (which is in op1) plus the other elements // from op2 (a clone of op1/tmp2 from the previous step) and combines them to // create the final result. switch (imm8) { case 1: { controlBits1 = 0; // 00 00 00 00; op1 = { X = op3, Y = op3, Z = op1.X, W = op1.X } controlBits2 = 226; // 11 10 00 10; node = { X = op1.X, Y = op3, Z = op1.Z, W = op1.W } break; } case 2: { controlBits1 = 15; // 00 00 11 11; op1 = { X = op1.W, Y = op1.W, Z = op3, W = op3 } controlBits2 = 36; // 00 10 01 00; node = { X = op1.X, Y = op1.Y, Z = op3, W = op1.W } break; } case 3: { controlBits1 = 10; // 00 00 10 10; op1 = { X = op1.Z, Y = op1.Z, Z = op3, W = op3 } controlBits2 = 132; // 10 00 01 00; node = { X = op1.X, Y = op1.Y, Z = op1.Z, W = op3 } break; } default: unreached(); } idx = comp->gtNewIconNode(controlBits1); BlockRange().InsertAfter(tmp2, idx); if (imm8 != 1) { std::swap(tmp1, tmp2); } op1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, idx, NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16); BlockRange().InsertAfter(idx, op1); LowerNode(op1); idx = comp->gtNewIconNode(controlBits2); BlockRange().InsertAfter(op1, idx); if (imm8 != 1) { std::swap(op1, op2); } result->ChangeHWIntrinsicId(NI_SSE_Shuffle, op1, op2, idx); } break; } else { imm8 = imm8 * 16; op3 = tmp1; FALLTHROUGH; } } case TYP_BYTE: case TYP_UBYTE: case TYP_INT: case TYP_UINT: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE41_Insert, op1, op3, idx); break; } case TYP_SHORT: case TYP_USHORT: { idx = comp->gtNewIconNode(imm8); BlockRange().InsertBefore(result, idx); result->ChangeHWIntrinsicId(NI_SSE2_Insert, op1, op3, idx); break; } case TYP_DOUBLE: { // We will be constructing the following parts: // ... // /--* op3 double // tmp1 = * HWINTRINSIC simd16 T CreateScalarUnsafe // This is roughly the following managed code: // ... // tmp1 = Vector128.CreateScalarUnsafe(op3); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op3, NI_Vector128_CreateScalarUnsafe, CORINFO_TYPE_DOUBLE, 16); BlockRange().InsertBefore(result, tmp1); LowerNode(tmp1); result->ResetHWIntrinsicId((imm8 == 0) ? NI_SSE2_MoveScalar : NI_SSE2_UnpackLow, op1, tmp1); break; } default: unreached(); } assert(result->GetHWIntrinsicId() != intrinsicId); LowerNode(result); if (intrinsicId == NI_Vector256_WithElement) { // Now that we have finalized the shape of the tree, lower the insertion node as well. assert(node->GetHWIntrinsicId() == NI_AVX_InsertVector128); assert(node != result); LowerNode(node); } } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicDot: Lowers a Vector128 or Vector256 Dot call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); unsigned simd16Count = comp->getSIMDVectorLength(16, simdBaseType); assert((intrinsicId == NI_Vector128_Dot) || (intrinsicId == NI_Vector256_Dot)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); GenTree* op1 = node->Op(1); GenTree* op2 = node->Op(2); // Spare GenTrees to be used for the lowering logic below // Defined upfront to avoid naming conflicts, etc... GenTree* idx = nullptr; GenTree* tmp1 = nullptr; GenTree* tmp2 = nullptr; GenTree* tmp3 = nullptr; NamedIntrinsic multiply = NI_Illegal; NamedIntrinsic horizontalAdd = NI_Illegal; NamedIntrinsic add = NI_Illegal; NamedIntrinsic shuffle = NI_Illegal; if (simdSize == 32) { switch (simdBaseType) { case TYP_SHORT: case TYP_USHORT: case TYP_INT: case TYP_UINT: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX2)); multiply = NI_AVX2_MultiplyLow; horizontalAdd = NI_AVX2_HorizontalAdd; add = NI_AVX2_Add; break; } case TYP_FLOAT: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); // We will be constructing the following parts: // idx = CNS_INT int 0xF1 // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp1 = * HWINTRINSIC simd16 T DotProduct // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0x01 // /--* tmp2 simd16 // +--* idx int // tmp2 = * HWINTRINSIC simd16 T ExtractVector128 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp3 = * HWINTRINSIC simd16 T Add // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp1 = Avx.DotProduct(op1, op2, 0xFF); // var tmp2 = Avx.ExtractVector128(tmp1, 0x01); // var tmp3 = Sse.Add(tmp1, tmp2); // return tmp3.ToScalar(); idx = comp->gtNewIconNode(0xF1, TYP_INT); BlockRange().InsertBefore(node, idx); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp1); LowerNode(tmp1); node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_SSE_Add, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp3); LowerNode(tmp3); node->SetSimdSize(16); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } case TYP_DOUBLE: { assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX)); multiply = NI_AVX_Multiply; horizontalAdd = NI_AVX_HorizontalAdd; add = NI_AVX_Add; break; } default: { unreached(); } } } else { assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2)); switch (simdBaseType) { case TYP_SHORT: case TYP_USHORT: { multiply = NI_SSE2_MultiplyLow; horizontalAdd = NI_SSSE3_HorizontalAdd; add = NI_SSE2_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSSE3)) { shuffle = NI_SSE2_ShuffleLow; } break; } case TYP_INT: case TYP_UINT: { multiply = NI_SSE41_MultiplyLow; horizontalAdd = NI_SSSE3_HorizontalAdd; add = NI_SSE2_Add; assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE41)); break; } case TYP_FLOAT: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We will be constructing the following parts: // idx = CNS_INT int 0xFF // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp3 = * HWINTRINSIC simd16 T DotProduct // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp3 = Avx.DotProduct(op1, op2, 0xFF); // return tmp3.ToScalar(); if (simdSize == 8) { idx = comp->gtNewIconNode(0x31, TYP_INT); } else if (simdSize == 12) { idx = comp->gtNewIconNode(0x71, TYP_INT); } else { assert(simdSize == 16); idx = comp->gtNewIconNode(0xF1, TYP_INT); } BlockRange().InsertBefore(node, idx); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } multiply = NI_SSE_Multiply; horizontalAdd = NI_SSE3_HorizontalAdd; add = NI_SSE_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { shuffle = NI_SSE_Shuffle; } break; } case TYP_DOUBLE: { if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { // We will be constructing the following parts: // idx = CNS_INT int 0x31 // /--* op1 simd16 // +--* op2 simd16 // +--* idx int // tmp3 = * HWINTRINSIC simd16 T DotProduct // /--* tmp3 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // var tmp3 = Avx.DotProduct(op1, op2, 0x31); // return tmp3.ToScalar(); idx = comp->gtNewIconNode(0x31, TYP_INT); BlockRange().InsertBefore(node, idx); tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp3); LowerNode(tmp3); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp3); LowerNode(node); return; } multiply = NI_SSE2_Multiply; horizontalAdd = NI_SSE3_HorizontalAdd; add = NI_SSE2_Add; if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE3)) { shuffle = NI_SSE2_Shuffle; } break; } default: { unreached(); } } if (simdSize == 8) { assert(simdBaseType == TYP_FLOAT); // If simdSize == 8 then we have only two elements, not the 4 that we got from getSIMDVectorLength, // which we gave a simdSize of 16. So, we set the simd16Count to 2 so that only 1 hadd will // be emitted rather than 2, so that the upper two elements will be ignored. simd16Count = 2; } else if (simdSize == 12) { assert(simdBaseType == TYP_FLOAT); // We will be constructing the following parts: // ... // +--* CNS_INT int -1 // +--* CNS_INT int -1 // +--* CNS_INT int -1 // +--* CNS_INT int 0 // tmp1 = * HWINTRINSIC simd16 T Create // /--* op2 simd16 // +--* tmp1 simd16 // op1 = * HWINTRINSIC simd16 T And // ... // This is roughly the following managed code: // ... // tmp1 = Vector128.Create(-1, -1, -1, 0); // op1 = Sse.And(op1, tmp2); // ... GenTree* cns0 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(op1, cns0); GenTree* cns1 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(cns0, cns1); GenTree* cns2 = comp->gtNewIconNode(-1, TYP_INT); BlockRange().InsertAfter(cns1, cns2); GenTree* cns3 = comp->gtNewIconNode(0, TYP_INT); BlockRange().InsertAfter(cns2, cns3); tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, cns0, cns1, cns2, cns3, NI_Vector128_Create, CORINFO_TYPE_INT, 16); BlockRange().InsertAfter(cns3, tmp1); LowerNode(tmp1); op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, tmp1, NI_SSE_And, simdBaseJitType, simdSize); BlockRange().InsertAfter(tmp1, op1); LowerNode(op1); } } // We will be constructing the following parts: // /--* op1 simd16 // +--* op2 simd16 // tmp1 = * HWINTRINSIC simd16 T Multiply // ... // This is roughly the following managed code: // var tmp1 = Isa.Multiply(op1, op2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize); BlockRange().InsertBefore(node, tmp1); LowerNode(tmp1); // HorizontalAdd combines pairs so we need log2(simd16Count) passes to sum all elements together. int haddCount = genLog2(simd16Count); for (int i = 0; i < haddCount; i++) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // ... // This is roughly the following managed code: // ... // tmp2 = tmp1; // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); if (shuffle == NI_Illegal) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T HorizontalAdd // ... // This is roughly the following managed code: // ... // tmp1 = Isa.HorizontalAdd(tmp1, tmp2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, simdBaseJitType, simdSize); } else { int shuffleConst = 0x00; switch (i) { case 0: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) || varTypeIsFloating(simdBaseType)); // Adds (e0 + e1, e1 + e0, e2 + e3, e3 + e2), giving: // e0, e1, e2, e3 | e4, e5, e6, e7 // e1, e0, e3, e2 | e5, e4, e7, e6 // ... shuffleConst = 0xB1; break; } case 1: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) || (simdBaseType == TYP_FLOAT)); // Adds (e0 + e2, e1 + e3, e2 + e0, e3 + e1), giving: // ... // e2, e3, e0, e1 | e6, e7, e4, e5 // e3, e2, e1, e0 | e7, e6, e5, e4 shuffleConst = 0x4E; break; } case 2: { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)); // Adds (e0 + e4, e1 + e5, e2 + e6, e3 + e7), giving: // ... // e4, e5, e6, e7 | e0, e1, e2, e3 // e5, e4, e7, e6 | e1, e0, e3, e2 // e6, e7, e4, e5 | e2, e3, e0, e1 // e7, e6, e5, e4 | e3, e2, e1, e0 shuffleConst = 0x4E; break; } default: { unreached(); } } idx = comp->gtNewIconNode(shuffleConst, TYP_INT); BlockRange().InsertAfter(tmp2, idx); if (varTypeIsFloating(simdBaseType)) { // We will be constructing the following parts: // ... // /--* tmp2 simd16 // * STORE_LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // tmp3 = LCL_VAR simd16 // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* tmp3 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T Shuffle // ... // This is roughly the following managed code: // ... // tmp3 = tmp2; // tmp2 = Isa.Shuffle(tmp2, tmp3, shuffleConst); // ... node->Op(1) = tmp2; LIR::Use tmp2Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp2Use); tmp2 = node->Op(1); tmp3 = comp->gtClone(tmp2); BlockRange().InsertAfter(tmp2, tmp3); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, simdBaseJitType, simdSize); } else { assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT)); if (i < 2) { // We will be constructing the following parts: // ... // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleLow // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleHigh // ... // This is roughly the following managed code: // ... // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleLow, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); idx = comp->gtNewIconNode(shuffleConst, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleHigh, simdBaseJitType, simdSize); } else { assert(i == 2); // We will be constructing the following parts: // ... // idx = CNS_INT int shuffleConst // /--* tmp2 simd16 // +--* idx simd16 // tmp2 = * HWINTRINSIC simd16 T ShuffleLow // ... // This is roughly the following managed code: // ... // tmp2 = Isa.Shuffle(tmp1, shuffleConst); // ... tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_Shuffle, CORINFO_TYPE_INT, simdSize); } } BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); // We will be constructing the following parts: // ... // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T Add // ... // This is roughly the following managed code: // ... // tmp1 = Isa.Add(tmp1, tmp2); // ... tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, add, simdBaseJitType, simdSize); } BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); } if (simdSize == 32) { // We will be constructing the following parts: // ... // /--* tmp1 simd16 // * STORE_LCL_VAR simd16 // tmp1 = LCL_VAR simd16 // tmp2 = LCL_VAR simd16 // idx = CNS_INT int 0x01 // /--* tmp2 simd16 // +--* idx int // tmp2 = * HWINTRINSIC simd16 T ExtractVector128 // /--* tmp1 simd16 // +--* tmp2 simd16 // tmp1 = * HWINTRINSIC simd16 T Add // ... // This is roughly the following managed code: // ... // var tmp2 = tmp1; // tmp2 = Avx.ExtractVector128(tmp2, 0x01); // var tmp1 = Isa.Add(tmp1, tmp2); // ... node->Op(1) = tmp1; LIR::Use tmp1Use(BlockRange(), &node->Op(1), node); ReplaceWithLclVar(tmp1Use); tmp1 = node->Op(1); tmp2 = comp->gtClone(tmp1); BlockRange().InsertAfter(tmp1, tmp2); idx = comp->gtNewIconNode(0x01, TYP_INT); BlockRange().InsertAfter(tmp2, idx); tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize); BlockRange().InsertAfter(idx, tmp2); LowerNode(tmp2); tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, add, simdBaseJitType, 16); BlockRange().InsertAfter(tmp2, tmp1); LowerNode(tmp1); node->SetSimdSize(16); } // We will be constructing the following parts: // ... // /--* tmp1 simd16 // node = * HWINTRINSIC simd16 T ToScalar // This is roughly the following managed code: // ... // return tmp1.ToScalar(); node->ResetHWIntrinsicId(NI_Vector128_ToScalar, tmp1); LowerNode(node); } //---------------------------------------------------------------------------------------------- // Lowering::LowerHWIntrinsicToScalar: Lowers a Vector128 or Vector256 ToScalar call // // Arguments: // node - The hardware intrinsic node. // void Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); unsigned simdSize = node->GetSimdSize(); var_types simdType = Compiler::getSIMDTypeForSize(simdSize); assert((intrinsicId == NI_Vector128_ToScalar) || (intrinsicId == NI_Vector256_ToScalar)); assert(varTypeIsSIMD(simdType)); assert(varTypeIsArithmetic(simdBaseType)); assert(simdSize != 0); switch (simdBaseType) { case TYP_BYTE: case TYP_SHORT: case TYP_INT: { node->gtType = TYP_INT; node->SetSimdBaseJitType(CORINFO_TYPE_INT); node->ChangeHWIntrinsicId(NI_SSE2_ConvertToInt32); break; } case TYP_UBYTE: case TYP_USHORT: case TYP_UINT: { node->gtType = TYP_UINT; node->SetSimdBaseJitType(CORINFO_TYPE_UINT); node->ChangeHWIntrinsicId(NI_SSE2_ConvertToUInt32); break; } #if defined(TARGET_AMD64) case TYP_LONG: { node->ChangeHWIntrinsicId(NI_SSE2_X64_ConvertToInt64); break; } case TYP_ULONG: { node->ChangeHWIntrinsicId(NI_SSE2_X64_ConvertToUInt64); break; } #endif // TARGET_AMD64 case TYP_FLOAT: case TYP_DOUBLE: { ContainCheckHWIntrinsic(node); return; } default: { unreached(); } } LowerNode(node); if (genTypeSize(simdBaseType) < 4) { LIR::Use use; bool foundUse = BlockRange().TryGetUse(node, &use); GenTreeCast* cast = comp->gtNewCastNode(simdBaseType, node, node->IsUnsigned(), simdBaseType); BlockRange().InsertAfter(node, cast); if (foundUse) { use.ReplaceWith(cast); } LowerNode(cast); } } //---------------------------------------------------------------------------------------------- // Lowering::TryLowerAndOpToResetLowestSetBit: Lowers a tree AND(X, ADD(X, -1)) to HWIntrinsic::ResetLowestSetBit // // Arguments: // andNode - GT_AND node of integral type // // Return Value: // Returns the replacement node if one is created else nullptr indicating no replacement // // Notes: // Performs containment checks on the replacement node if one is created GenTree* Lowering::TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode) { assert(andNode->OperIs(GT_AND) && varTypeIsIntegral(andNode)); GenTree* op1 = andNode->gtGetOp1(); if (!op1->OperIs(GT_LCL_VAR) || comp->lvaGetDesc(op1->AsLclVar())->IsAddressExposed()) { return nullptr; } GenTree* op2 = andNode->gtGetOp2(); if (!op2->OperIs(GT_ADD)) { return nullptr; } GenTree* addOp2 = op2->gtGetOp2(); if (!addOp2->IsIntegralConst(-1)) { return nullptr; } GenTree* addOp1 = op2->gtGetOp1(); if (!addOp1->OperIs(GT_LCL_VAR) || (addOp1->AsLclVar()->GetLclNum() != op1->AsLclVar()->GetLclNum())) { return nullptr; } NamedIntrinsic intrinsic; if (op1->TypeIs(TYP_LONG) && comp->compOpportunisticallyDependsOn(InstructionSet_BMI1_X64)) { intrinsic = NamedIntrinsic::NI_BMI1_X64_ResetLowestSetBit; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_BMI1)) { intrinsic = NamedIntrinsic::NI_BMI1_ResetLowestSetBit; } else { return nullptr; } LIR::Use use; if (!BlockRange().TryGetUse(andNode, &use)) { return nullptr; } GenTreeHWIntrinsic* blsrNode = comp->gtNewScalarHWIntrinsicNode(andNode->TypeGet(), op1, intrinsic); JITDUMP("Lower: optimize AND(X, ADD(X, -1))\n"); DISPNODE(andNode); JITDUMP("to:\n"); DISPNODE(blsrNode); use.ReplaceWith(blsrNode); BlockRange().InsertBefore(andNode, blsrNode); BlockRange().Remove(andNode); BlockRange().Remove(op2); BlockRange().Remove(addOp1); BlockRange().Remove(addOp2); ContainCheckHWIntrinsic(blsrNode); return blsrNode; } //---------------------------------------------------------------------------------------------- // Lowering::TryLowerAndOpToExtractLowestSetIsolatedBit: Lowers a tree AND(X, NEG(X)) to // HWIntrinsic::ExtractLowestSetBit // // Arguments: // andNode - GT_AND node of integral type // // Return Value: // Returns the replacement node if one is created else nullptr indicating no replacement // // Notes: // Performs containment checks on the replacement node if one is created GenTree* Lowering::TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode) { GenTree* opNode = nullptr; GenTree* negNode = nullptr; if (andNode->gtGetOp1()->OperIs(GT_NEG)) { negNode = andNode->gtGetOp1(); opNode = andNode->gtGetOp2(); } else if (andNode->gtGetOp2()->OperIs(GT_NEG)) { negNode = andNode->gtGetOp2(); opNode = andNode->gtGetOp1(); } if (opNode == nullptr) { return nullptr; } GenTree* negOp = negNode->AsUnOp()->gtGetOp1(); if (!negOp->OperIs(GT_LCL_VAR) || !opNode->OperIs(GT_LCL_VAR) || (negOp->AsLclVar()->GetLclNum() != opNode->AsLclVar()->GetLclNum())) { return nullptr; } NamedIntrinsic intrinsic; if (andNode->TypeIs(TYP_LONG) && comp->compOpportunisticallyDependsOn(InstructionSet_BMI1_X64)) { intrinsic = NamedIntrinsic::NI_BMI1_X64_ExtractLowestSetBit; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_BMI1)) { intrinsic = NamedIntrinsic::NI_BMI1_ExtractLowestSetBit; } else { return nullptr; } LIR::Use use; if (!BlockRange().TryGetUse(andNode, &use)) { return nullptr; } GenTreeHWIntrinsic* blsiNode = comp->gtNewScalarHWIntrinsicNode(andNode->TypeGet(), opNode, intrinsic); JITDUMP("Lower: optimize AND(X, NEG(X)))\n"); DISPNODE(andNode); JITDUMP("to:\n"); DISPNODE(blsiNode); use.ReplaceWith(blsiNode); BlockRange().InsertBefore(andNode, blsiNode); BlockRange().Remove(andNode); BlockRange().Remove(negNode); BlockRange().Remove(negOp); ContainCheckHWIntrinsic(blsiNode); return blsiNode; } //---------------------------------------------------------------------------------------------- // Lowering::TryLowerAndOpToAndNot: Lowers a tree AND(X, NOT(Y)) to HWIntrinsic::AndNot // // Arguments: // andNode - GT_AND node of integral type // // Return Value: // Returns the replacement node if one is created else nullptr indicating no replacement // // Notes: // Performs containment checks on the replacement node if one is created GenTree* Lowering::TryLowerAndOpToAndNot(GenTreeOp* andNode) { assert(andNode->OperIs(GT_AND) && varTypeIsIntegral(andNode)); GenTree* opNode = nullptr; GenTree* notNode = nullptr; if (andNode->gtGetOp1()->OperIs(GT_NOT)) { notNode = andNode->gtGetOp1(); opNode = andNode->gtGetOp2(); } else if (andNode->gtGetOp2()->OperIs(GT_NOT)) { notNode = andNode->gtGetOp2(); opNode = andNode->gtGetOp1(); } if (opNode == nullptr) { return nullptr; } // We want to avoid using "andn" when one of the operands is both a source and the destination and is also coming // from memory. In this scenario, we will get smaller and likely faster code by using the RMW encoding of `and` if (IsBinOpInRMWStoreInd(andNode)) { return nullptr; } NamedIntrinsic intrinsic; if (andNode->TypeIs(TYP_LONG) && comp->compOpportunisticallyDependsOn(InstructionSet_BMI1_X64)) { intrinsic = NamedIntrinsic::NI_BMI1_X64_AndNot; } else if (comp->compOpportunisticallyDependsOn(InstructionSet_BMI1)) { intrinsic = NamedIntrinsic::NI_BMI1_AndNot; } else { return nullptr; } LIR::Use use; if (!BlockRange().TryGetUse(andNode, &use)) { return nullptr; } // note that parameter order for andn is ~y, x so these are purposefully reversed when creating the node GenTreeHWIntrinsic* andnNode = comp->gtNewScalarHWIntrinsicNode(andNode->TypeGet(), notNode->AsUnOp()->gtGetOp1(), opNode, intrinsic); JITDUMP("Lower: optimize AND(X, NOT(Y)))\n"); DISPNODE(andNode); JITDUMP("to:\n"); DISPNODE(andnNode); use.ReplaceWith(andnNode); BlockRange().InsertBefore(andNode, andnNode); BlockRange().Remove(andNode); BlockRange().Remove(notNode); ContainCheckHWIntrinsic(andnNode); return andnNode; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // Lowering::IsRMWIndirCandidate: // Returns true if the given operand is a candidate indirection for a read-modify-write // operator. // // Arguments: // operand - The operand to consider. // storeInd - The indirect store that roots the possible RMW operator. // bool Lowering::IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd) { // If the operand isn't an indirection, it's trivially not a candidate. if (operand->OperGet() != GT_IND) { return false; } // If the indirection's source address isn't equivalent to the destination address of the storeIndir, then the // indirection is not a candidate. GenTree* srcAddr = operand->gtGetOp1(); GenTree* dstAddr = storeInd->gtGetOp1(); if ((srcAddr->OperGet() != dstAddr->OperGet()) || !IndirsAreEquivalent(operand, storeInd)) { return false; } // If it is not safe to contain the entire tree rooted at the indirection, then the indirection is not a // candidate. Crawl the IR from the node immediately preceding the storeIndir until the last node in the // indirection's tree is visited and check the side effects at each point. m_scratchSideEffects.Clear(); assert((operand->gtLIRFlags & LIR::Flags::Mark) == 0); operand->gtLIRFlags |= LIR::Flags::Mark; unsigned markCount = 1; GenTree* node; for (node = storeInd->gtPrev; markCount > 0; node = node->gtPrev) { assert(node != nullptr); if ((node->gtLIRFlags & LIR::Flags::Mark) == 0) { m_scratchSideEffects.AddNode(comp, node); } else { node->gtLIRFlags &= ~LIR::Flags::Mark; markCount--; if (m_scratchSideEffects.InterferesWith(comp, node, false)) { // The indirection's tree contains some node that can't be moved to the storeInder. The indirection is // not a candidate. Clear any leftover mark bits and return. for (; markCount > 0; node = node->gtPrev) { if ((node->gtLIRFlags & LIR::Flags::Mark) != 0) { node->gtLIRFlags &= ~LIR::Flags::Mark; markCount--; } } return false; } node->VisitOperands([&markCount](GenTree* nodeOperand) -> GenTree::VisitResult { assert((nodeOperand->gtLIRFlags & LIR::Flags::Mark) == 0); nodeOperand->gtLIRFlags |= LIR::Flags::Mark; markCount++; return GenTree::VisitResult::Continue; }); } } // At this point we've verified that the operand is an indirection, its address is equivalent to the storeIndir's // destination address, and that it and the transitive closure of its operand can be safely contained by the // storeIndir. This indirection is therefore a candidate for an RMW op. return true; } //---------------------------------------------------------------------------------------------- // Returns true if this tree is bin-op of a GT_STOREIND of the following form // storeInd(subTreeA, binOp(gtInd(subTreeA), subtreeB)) or // storeInd(subTreeA, binOp(subtreeB, gtInd(subTreeA)) in case of commutative bin-ops // // The above form for storeInd represents a read-modify-write memory binary operation. // // Parameters // tree - GentreePtr of binOp // // Return Value // True if 'tree' is part of a RMW memory operation pattern // bool Lowering::IsBinOpInRMWStoreInd(GenTree* tree) { // Must be a non floating-point type binary operator since SSE2 doesn't support RMW memory ops assert(!varTypeIsFloating(tree)); assert(GenTree::OperIsBinary(tree->OperGet())); // Cheap bail out check before more expensive checks are performed. // RMW memory op pattern requires that one of the operands of binOp to be GT_IND. if (tree->gtGetOp1()->OperGet() != GT_IND && tree->gtGetOp2()->OperGet() != GT_IND) { return false; } LIR::Use use; if (!BlockRange().TryGetUse(tree, &use) || use.User()->OperGet() != GT_STOREIND || use.User()->gtGetOp2() != tree) { return false; } // Since it is not relatively cheap to recognize RMW memory op pattern, we // cache the result in GT_STOREIND node so that while lowering GT_STOREIND // we can use the result. GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; return IsRMWMemOpRootedAtStoreInd(use.User(), &indirCandidate, &indirOpSource); } //---------------------------------------------------------------------------------------------- // This method recognizes the case where we have a treeNode with the following structure: // storeInd(IndirDst, binOp(gtInd(IndirDst), indirOpSource)) OR // storeInd(IndirDst, binOp(indirOpSource, gtInd(IndirDst)) in case of commutative operations OR // storeInd(IndirDst, unaryOp(gtInd(IndirDst)) in case of unary operations // // Terminology: // indirDst = memory write of an addr mode (i.e. storeind destination) // indirSrc = value being written to memory (i.e. storeind source which could either be a binary or unary op) // indirCandidate = memory read i.e. a gtInd of an addr mode // indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node) // // In x86/x64 this storeInd pattern can be effectively encoded in a single instruction of the // following form in case of integer operations: // binOp [addressing mode], RegIndirOpSource // binOp [addressing mode], immediateVal // where RegIndirOpSource is the register where indirOpSource was computed. // // Right now, we recognize few cases: // a) The gtInd child is a lea/lclVar/lclVarAddr/clsVarAddr/constant // b) BinOp is either add, sub, xor, or, and, shl, rsh, rsz. // c) unaryOp is either not/neg // // Implementation Note: The following routines need to be in sync for RMW memory op optimization // to be correct and functional. // IndirsAreEquivalent() // NodesAreEquivalentLeaves() // Codegen of GT_STOREIND and genCodeForShiftRMW() // emitInsRMW() // // TODO-CQ: Enable support for more complex indirections (if needed) or use the value numbering // package to perform more complex tree recognition. // // TODO-XArch-CQ: Add support for RMW of lcl fields (e.g. lclfield binop= source) // // Parameters: // tree - GT_STOREIND node // outIndirCandidate - out param set to indirCandidate as described above // ouutIndirOpSource - out param set to indirOpSource as described above // // Return value // True if there is a RMW memory operation rooted at a GT_STOREIND tree // and out params indirCandidate and indirOpSource are set to non-null values. // Otherwise, returns false with indirCandidate and indirOpSource set to null. // Also updates flags of GT_STOREIND tree with its RMW status. // bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTree* tree, GenTree** outIndirCandidate, GenTree** outIndirOpSource) { assert(!varTypeIsFloating(tree)); assert(outIndirCandidate != nullptr); assert(outIndirOpSource != nullptr); *outIndirCandidate = nullptr; *outIndirOpSource = nullptr; // Early out if storeInd is already known to be a non-RMW memory op GenTreeStoreInd* storeInd = tree->AsStoreInd(); if (storeInd->IsNonRMWMemoryOp()) { return false; } GenTree* indirDst = storeInd->gtGetOp1(); GenTree* indirSrc = storeInd->gtGetOp2(); genTreeOps oper = indirSrc->OperGet(); // Early out if it is already known to be a RMW memory op if (storeInd->IsRMWMemoryOp()) { if (GenTree::OperIsBinary(oper)) { if (storeInd->IsRMWDstOp1()) { *outIndirCandidate = indirSrc->gtGetOp1(); *outIndirOpSource = indirSrc->gtGetOp2(); } else { assert(storeInd->IsRMWDstOp2()); *outIndirCandidate = indirSrc->gtGetOp2(); *outIndirOpSource = indirSrc->gtGetOp1(); } assert(IndirsAreEquivalent(*outIndirCandidate, storeInd)); } else { assert(GenTree::OperIsUnary(oper)); assert(IndirsAreEquivalent(indirSrc->gtGetOp1(), storeInd)); *outIndirCandidate = indirSrc->gtGetOp1(); *outIndirOpSource = indirSrc->gtGetOp1(); } return true; } // If reached here means that we do not know RMW status of tree rooted at storeInd assert(storeInd->IsRMWStatusUnknown()); // Early out if indirDst is not one of the supported memory operands. if (!indirDst->OperIs(GT_LEA, GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_CNS_INT)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } // We can not use Read-Modify-Write instruction forms with overflow checking instructions // because we are not allowed to modify the target until after the overflow check. if (indirSrc->gtOverflowEx()) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } // At this point we can match one of two patterns: // // t_ind = indir t_addr_0 // ... // t_value = binop t_ind, t_other // ... // storeIndir t_addr_1, t_value // // or // // t_ind = indir t_addr_0 // ... // t_value = unop t_ind // ... // storeIndir t_addr_1, t_value // // In all cases, we will eventually make the binop that produces t_value and the entire dataflow tree rooted at // t_ind contained by t_value. GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; RMWStatus status = STOREIND_RMW_STATUS_UNKNOWN; if (GenTree::OperIsBinary(oper)) { // Return if binary op is not one of the supported operations for RMW of memory. if (!GenTree::OperIsRMWMemOp(oper)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } if (GenTree::OperIsShiftOrRotate(oper) && varTypeIsSmall(storeInd)) { // In ldind, Integer values smaller than 4 bytes, a boolean, or a character converted to 4 bytes // by sign or zero-extension as appropriate. If we directly shift the short type data using sar, we // will lose the sign or zero-extension bits. storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_TYPE); return false; } // In the common case, the second operand to the binop will be the indir candidate. GenTreeOp* binOp = indirSrc->AsOp(); if (GenTree::OperIsCommutative(oper) && IsRMWIndirCandidate(binOp->gtOp2, storeInd)) { indirCandidate = binOp->gtOp2; indirOpSource = binOp->gtOp1; status = STOREIND_RMW_DST_IS_OP2; } else if (IsRMWIndirCandidate(binOp->gtOp1, storeInd)) { indirCandidate = binOp->gtOp1; indirOpSource = binOp->gtOp2; status = STOREIND_RMW_DST_IS_OP1; } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } } else if (GenTree::OperIsUnary(oper)) { // Nodes other than GT_NOT and GT_NEG are not yet supported. if (oper != GT_NOT && oper != GT_NEG) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } if (indirSrc->gtGetOp1()->OperGet() != GT_IND) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } GenTreeUnOp* unOp = indirSrc->AsUnOp(); if (IsRMWIndirCandidate(unOp->gtOp1, storeInd)) { // src and dest are the same in case of unary ops indirCandidate = unOp->gtOp1; indirOpSource = unOp->gtOp1; status = STOREIND_RMW_DST_IS_OP1; } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } } else { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER); return false; } // By this point we've verified that we have a supported operand with a supported address. Now we need to ensure // that we're able to move the destination address for the source indirection forwards. if (!IsSafeToContainMem(storeInd, indirDst)) { storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR); return false; } assert(indirCandidate != nullptr); assert(indirOpSource != nullptr); assert(status != STOREIND_RMW_STATUS_UNKNOWN); *outIndirCandidate = indirCandidate; *outIndirOpSource = indirOpSource; storeInd->SetRMWStatus(status); return true; } // anything is in range for AMD64 bool Lowering::IsCallTargetInRange(void* addr) { return true; } // return true if the immediate can be folded into an instruction, for example small enough and non-relocatable bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const { if (!childNode->IsIntCnsFitsInI32()) { return false; } // At this point we know that it is an int const fits within 4-bytes and hence can safely cast to IntConCommon. // Icons that need relocation should never be marked as contained immed if (childNode->AsIntConCommon()->ImmedValNeedsReloc(comp)) { return false; } return true; } //----------------------------------------------------------------------- // PreferredRegOptionalOperand: returns one of the operands of given // binary oper that is to be preferred for marking as reg optional. // // Since only one of op1 or op2 can be a memory operand on xarch, only // one of them have to be marked as reg optional. Since Lower doesn't // know apriori which of op1 or op2 is not likely to get a register, it // has to make a guess. This routine encapsulates heuristics that // guess whether it is likely to be beneficial to mark op1 or op2 as // reg optional. // // // Arguments: // tree - a binary-op tree node that is either commutative // or a compare oper. // // Returns: // Returns op1 or op2 of tree node that is preferred for // marking as reg optional. // // Note: if the tree oper is neither commutative nor a compare oper // then only op2 can be reg optional on xarch and hence no need to // call this routine. GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree) { assert(GenTree::OperIsBinary(tree->OperGet())); assert(tree->OperIsCommutative() || tree->OperIsCompare() || tree->OperIs(GT_CMP)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); assert(!op1->IsRegOptional() && !op2->IsRegOptional()); // We default to op1, as op2 is likely to have the shorter lifetime. GenTree* preferredOp = op1; // This routine uses the following heuristics: // // a) If both are register candidates, marking the one with lower weighted // ref count as reg-optional would likely be beneficial as it has // higher probability of not getting a register. Note that we use !lvDoNotEnregister // here because this is being done while we are adding lclVars for Lowering. // // b) op1 = tracked local and op2 = untracked local: LSRA creates two // ref positions for op2: a def and use position. op2's def position // requires a reg and it is allocated a reg by spilling another // interval (if required) and that could be even op1. For this reason // it is beneficial to mark op1 as reg optional. // // TODO: It is not always mandatory for a def position of an untracked // local to be allocated a register if it is on rhs of an assignment // and its use position is reg-optional and has not been assigned a // register. Reg optional def positions is currently not yet supported. // // c) op1 = untracked local and op2 = tracked local: marking op1 as // reg optional is beneficial, since its use position is less likely // to get a register. // // d) If both are untracked locals (i.e. treated like tree temps by // LSRA): though either of them could be marked as reg optional, // marking op1 as reg optional is likely to be beneficial because // while allocating op2's def position, there is a possibility of // spilling op1's def and in which case op1 is treated as contained // memory operand rather than requiring to reload. // // e) If only one of them is a local var, prefer to mark it as // reg-optional. This is heuristic is based on the results // obtained against CQ perf benchmarks. // // f) If neither of them are local vars (i.e. tree temps), prefer to // mark op1 as reg optional for the same reason as mentioned in (d) above. if (op1->OperGet() == GT_LCL_VAR && op2->OperGet() == GT_LCL_VAR) { LclVarDsc* v1 = comp->lvaGetDesc(op1->AsLclVarCommon()); LclVarDsc* v2 = comp->lvaGetDesc(op2->AsLclVarCommon()); bool v1IsRegCandidate = !v1->lvDoNotEnregister; bool v2IsRegCandidate = !v2->lvDoNotEnregister; if (v1IsRegCandidate && v2IsRegCandidate) { // Both are enregisterable locals. The one with lower weight is less likely // to get a register and hence beneficial to mark the one with lower // weight as reg optional. // If either is not tracked, it may be that it was introduced after liveness // was run, in which case we will always prefer op1 (should we use raw refcnt??). if (v1->lvTracked && v2->lvTracked && (v1->lvRefCntWtd() >= v2->lvRefCntWtd())) { preferredOp = op2; } } } else if (!(op1->OperGet() == GT_LCL_VAR) && (op2->OperGet() == GT_LCL_VAR)) { preferredOp = op2; } return preferredOp; } //------------------------------------------------------------------------ // Containment analysis //------------------------------------------------------------------------ //------------------------------------------------------------------------ // ContainCheckCallOperands: Determine whether operands of a call should be contained. // // Arguments: // call - The call node of interest // // Return Value: // None. // void Lowering::ContainCheckCallOperands(GenTreeCall* call) { GenTree* ctrlExpr = call->gtControlExpr; if (call->gtCallType == CT_INDIRECT) { // either gtControlExpr != null or gtCallAddr != null. // Both cannot be non-null at the same time. assert(ctrlExpr == nullptr); assert(call->gtCallAddr != nullptr); ctrlExpr = call->gtCallAddr; #ifdef TARGET_X86 // Fast tail calls aren't currently supported on x86, but if they ever are, the code // below that handles indirect VSD calls will need to be fixed. assert(!call->IsFastTailCall() || !call->IsVirtualStub()); #endif // TARGET_X86 } // set reg requirements on call target represented as control sequence. if (ctrlExpr != nullptr) { // we should never see a gtControlExpr whose type is void. assert(ctrlExpr->TypeGet() != TYP_VOID); #ifdef TARGET_X86 // On x86, we need to generate a very specific pattern for indirect VSD calls: // // 3-byte nop // call dword ptr [eax] // // Where EAX is also used as an argument to the stub dispatch helper. Make // sure that the call target address is computed into EAX in this case. if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT)) { assert(ctrlExpr->isIndir()); MakeSrcContained(call, ctrlExpr); } else #endif // TARGET_X86 if (ctrlExpr->isIndir()) { // We may have cases where we have set a register target on the ctrlExpr, but if it // contained we must clear it. ctrlExpr->SetRegNum(REG_NA); MakeSrcContained(call, ctrlExpr); } } for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { LowerPutArgStk(use.GetNode()->AsPutArgStk()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { LowerPutArgStk(use.GetNode()->AsPutArgStk()); } } } //------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // node - The indirection node of interest // // Notes: // This is called for both store and load indirections. In the former case, it is assumed that // LowerStoreIndir() has already been called to check for RMW opportunities. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* node) { GenTree* addr = node->Addr(); // If this is the rhs of a block copy it will be handled when we handle the store. if (node->TypeGet() == TYP_STRUCT) { return; } #ifdef FEATURE_SIMD // If indirTree is of TYP_SIMD12, don't mark addr as contained // so that it always get computed to a register. This would // mean codegen side logic doesn't need to handle all possible // addr expressions that could be contained. // // TODO-XArch-CQ: handle other addr mode expressions that could be marked // as contained. if (node->TypeGet() == TYP_SIMD12) { return; } #endif // FEATURE_SIMD if ((node->gtFlags & GTF_IND_REQ_ADDR_IN_REG) != 0) { // The address of an indirection that requires its address in a reg. // Skip any further processing that might otherwise make it contained. } else if (addr->OperIs(GT_CLS_VAR_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR)) { // These nodes go into an addr mode: // - GT_CLS_VAR_ADDR turns into a constant. // - GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR is a stack addr mode. // make this contained, it turns into a constant that goes into an addr mode MakeSrcContained(node, addr); } else if (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp)) { // Amd64: // We can mark any pc-relative 32-bit addr as containable. // // On x86, direct VSD is done via a relative branch, and in fact it MUST be contained. MakeSrcContained(node, addr); } else if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(node, addr)) { MakeSrcContained(node, addr); } } //------------------------------------------------------------------------ // ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node) { // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. GenTree* src = node->Data(); if (IsContainableImmed(node, src) && (!src->IsIntegralConst(0) || varTypeIsSmall(node))) { MakeSrcContained(node, src); } ContainCheckIndir(node); } //------------------------------------------------------------------------ // ContainCheckMul: determine whether the sources of a MUL node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckMul(GenTreeOp* node) { #if defined(TARGET_X86) assert(node->OperIs(GT_MUL, GT_MULHI, GT_MUL_LONG)); #else assert(node->OperIs(GT_MUL, GT_MULHI)); #endif // Case of float/double mul. if (varTypeIsFloating(node->TypeGet())) { ContainCheckFloatBinary(node); return; } GenTree* op1 = node->AsOp()->gtOp1; GenTree* op2 = node->AsOp()->gtOp2; bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; bool isUnsignedMultiply = ((node->gtFlags & GTF_UNSIGNED) != 0); bool requiresOverflowCheck = node->gtOverflowEx(); bool useLeaEncoding = false; GenTree* memOp = nullptr; bool hasImpliedFirstOperand = false; GenTreeIntConCommon* imm = nullptr; GenTree* other = nullptr; // Multiply should never be using small types assert(!varTypeIsSmall(node->TypeGet())); // We do use the widening multiply to implement // the overflow checking for unsigned multiply // if (isUnsignedMultiply && requiresOverflowCheck) { hasImpliedFirstOperand = true; } else if (node->OperGet() == GT_MULHI) { hasImpliedFirstOperand = true; } #if defined(TARGET_X86) else if (node->OperGet() == GT_MUL_LONG) { hasImpliedFirstOperand = true; } #endif else if (IsContainableImmed(node, op2) || IsContainableImmed(node, op1)) { if (IsContainableImmed(node, op2)) { imm = op2->AsIntConCommon(); other = op1; } else { imm = op1->AsIntConCommon(); other = op2; } // CQ: We want to rewrite this into a LEA ssize_t immVal = imm->AsIntConCommon()->IconValue(); if (!requiresOverflowCheck && (immVal == 3 || immVal == 5 || immVal == 9)) { useLeaEncoding = true; } MakeSrcContained(node, imm); // The imm is always contained if (IsContainableMemoryOp(other)) { memOp = other; // memOp may be contained below } } // We allow one operand to be a contained memory operand. // The memory op type must match with the 'node' type. // This is because during codegen we use 'node' type to derive EmitTypeSize. // E.g op1 type = byte, op2 type = byte but GT_MUL node type is int. // if (memOp == nullptr) { if ((op2->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { memOp = op2; } } if ((memOp == nullptr) && (op1->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { memOp = op1; } } } else { if ((memOp->TypeGet() != node->TypeGet())) { memOp = nullptr; } else if (!IsSafeToContainMem(node, memOp)) { if (memOp == op1) { isSafeToContainOp1 = false; } else { isSafeToContainOp2 = false; } memOp = nullptr; } } // To generate an LEA we need to force memOp into a register // so don't allow memOp to be 'contained' // if (!useLeaEncoding) { if (memOp != nullptr) { MakeSrcContained(node, memOp); } else { // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, memOp would be set to the corresponding operand (op1 or op2). if (imm != nullptr) { // Has a contained immediate operand. // Only 'other' operand can be marked as reg optional. assert(other != nullptr); isSafeToContainOp1 = ((other == op1) && isSafeToContainOp1 && IsSafeToContainMem(node, op1)); isSafeToContainOp2 = ((other == op2) && isSafeToContainOp2 && IsSafeToContainMem(node, op2)); } else if (hasImpliedFirstOperand) { // Only op2 can be marked as reg optional. isSafeToContainOp1 = false; isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); } else { // If there are no containable operands, we can make either of op1 or op2 // as reg optional. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); } SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } } //------------------------------------------------------------------------ // ContainCheckDivOrMod: determine which operands of a div/mod should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckDivOrMod(GenTreeOp* node) { assert(node->OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD)); if (varTypeIsFloating(node->TypeGet())) { ContainCheckFloatBinary(node); return; } GenTree* divisor = node->gtGetOp2(); bool divisorCanBeRegOptional = true; #ifdef TARGET_X86 GenTree* dividend = node->gtGetOp1(); if (dividend->OperGet() == GT_LONG) { divisorCanBeRegOptional = false; MakeSrcContained(node, dividend); } #endif // divisor can be an r/m, but the memory indirection must be of the same size as the divide if (IsContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, divisor)) { MakeSrcContained(node, divisor); } else if (divisorCanBeRegOptional) { // If there are no containable operands, we can make an operand reg optional. // Div instruction allows only divisor to be a memory op. divisor->SetRegOptional(); } } //------------------------------------------------------------------------ // ContainCheckShiftRotate: determine whether the sources of a shift/rotate node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckShiftRotate(GenTreeOp* node) { assert(node->OperIsShiftOrRotate()); #ifdef TARGET_X86 GenTree* source = node->gtOp1; if (node->OperIsShiftLong()) { assert(source->OperGet() == GT_LONG); MakeSrcContained(node, source); } #endif // !TARGET_X86 GenTree* shiftBy = node->gtOp2; if (IsContainableImmed(node, shiftBy) && (shiftBy->AsIntConCommon()->IconValue() <= 255) && (shiftBy->AsIntConCommon()->IconValue() >= 0)) { MakeSrcContained(node, shiftBy); } } //------------------------------------------------------------------------ // ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const { assert(storeLoc->OperIsLocalStore()); GenTree* op1 = storeLoc->gtGetOp1(); if (op1->OperIs(GT_BITCAST)) { // If we know that the source of the bitcast will be in a register, then we can make // the bitcast itself contained. This will allow us to store directly from the other // type if this node doesn't get a register. GenTree* bitCastSrc = op1->gtGetOp1(); if (!bitCastSrc->isContained() && !bitCastSrc->IsRegOptional()) { op1->SetContained(); return; } } const LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc); #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc)) { assert(!op1->IsCnsIntOrI()); if (storeLoc->TypeIs(TYP_SIMD12) && op1->IsSIMDZero() && varDsc->lvDoNotEnregister) { // For a SIMD12 store we can zero from integer registers more easily. MakeSrcContained(storeLoc, op1); GenTree* constNode = op1->gtGetOp1(); assert(constNode->OperIsConst()); constNode->ClearContained(); constNode->gtType = TYP_INT; constNode->SetOper(GT_CNS_INT); } return; } #endif // FEATURE_SIMD // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. var_types type = varDsc->GetRegisterType(storeLoc); if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(type))) { MakeSrcContained(storeLoc, op1); } #ifdef TARGET_X86 else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } #endif // TARGET_X86 } //------------------------------------------------------------------------ // ContainCheckCast: determine whether the source of a CAST node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckCast(GenTreeCast* node) { GenTree* castOp = node->CastOp(); var_types castToType = node->CastToType(); var_types srcType = castOp->TypeGet(); // force the srcType to unsigned if GT_UNSIGNED flag is set if (node->gtFlags & GTF_UNSIGNED) { srcType = varTypeToUnsigned(srcType); } if (!node->gtOverflow() && (varTypeIsFloating(castToType) || varTypeIsFloating(srcType))) { #ifdef DEBUG // If converting to float/double, the operand must be 4 or 8 byte in size. if (varTypeIsFloating(castToType)) { unsigned opSize = genTypeSize(srcType); assert(opSize == 4 || opSize == 8); } #endif // DEBUG // U8 -> R8 conversion requires that the operand be in a register. if (srcType != TYP_ULONG) { if ((IsContainableMemoryOp(castOp) && IsSafeToContainMem(node, castOp)) || castOp->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, castOp); } else { // Mark castOp as reg optional to indicate codegen // can still generate code if it is on stack. castOp->SetRegOptional(); } } } #if !defined(TARGET_64BIT) if (varTypeIsLong(srcType)) { noway_assert(castOp->OperGet() == GT_LONG); castOp->SetContained(); } #endif // !defined(TARGET_64BIT) } //------------------------------------------------------------------------ // ContainCheckCompare: determine whether the sources of a compare node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckCompare(GenTreeOp* cmp) { assert(cmp->OperIsCompare() || cmp->OperIs(GT_CMP)); GenTree* op1 = cmp->AsOp()->gtOp1; GenTree* op2 = cmp->AsOp()->gtOp2; var_types op1Type = op1->TypeGet(); var_types op2Type = op2->TypeGet(); // If either of op1 or op2 is floating point values, then we need to use // ucomiss or ucomisd to compare, both of which support the following form: // ucomis[s|d] xmm, xmm/mem // That is only the second operand can be a memory op. // // Second operand is a memory Op: Note that depending on comparison operator, // the operands of ucomis[s|d] need to be reversed. Therefore, either op1 or // op2 can be a memory op depending on the comparison operator. if (varTypeIsFloating(op1Type)) { // The type of the operands has to be the same and no implicit conversions at this stage. assert(op1Type == op2Type); GenTree* otherOp; if (GenCondition::FromFloatRelop(cmp).PreferSwap()) { otherOp = op1; } else { otherOp = op2; } assert(otherOp != nullptr); bool isSafeToContainOtherOp = true; if (otherOp->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(cmp, otherOp); } else if (IsContainableMemoryOp(otherOp)) { isSafeToContainOtherOp = IsSafeToContainMem(cmp, otherOp); if (isSafeToContainOtherOp) { MakeSrcContained(cmp, otherOp); } } if (!otherOp->isContained() && isSafeToContainOtherOp && IsSafeToContainMem(cmp, otherOp)) { // SSE2 allows only otherOp to be a memory-op. Since otherOp is not // contained, we can mark it reg-optional. // IsSafeToContainMem is expensive so we call it at most once for otherOp. // If we already called IsSafeToContainMem, it must have returned false; // otherwise, otherOp would be contained. otherOp->SetRegOptional(); } return; } // TODO-XArch-CQ: factor out cmp optimization in 'genCondSetFlags' to be used here // or in other backend. if (CheckImmedAndMakeContained(cmp, op2)) { // If the types are the same, or if the constant is of the correct size, // we can treat the MemoryOp as contained. if (op1Type == op2Type) { if (IsContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1)) { MakeSrcContained(cmp, op1); } else { op1->SetRegOptional(); } } } else if (op1Type == op2Type) { // Note that TEST does not have a r,rm encoding like CMP has but we can still // contain the second operand because the emitter maps both r,rm and rm,r to // the same instruction code. This avoids the need to special case TEST here. bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(cmp, op2); if (isSafeToContainOp2) { MakeSrcContained(cmp, op2); } } if (!op2->isContained() && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(cmp, op1); if (isSafeToContainOp1) { MakeSrcContained(cmp, op1); } } if (!op1->isContained() && !op2->isContained()) { // One of op1 or op2 could be marked as reg optional // to indicate that codegen can still generate code // if one of them is on stack. GenTree* regOptionalCandidate = op1->IsCnsIntOrI() ? op2 : PreferredRegOptionalOperand(cmp); // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, the corresponding operand (op1 or op2) would be contained. bool setRegOptional = (regOptionalCandidate == op1) ? isSafeToContainOp1 && IsSafeToContainMem(cmp, op1) : isSafeToContainOp2 && IsSafeToContainMem(cmp, op2); if (setRegOptional) { regOptionalCandidate->SetRegOptional(); } } } } //------------------------------------------------------------------------ // LowerRMWMemOp: Determine if this is a valid RMW mem op, and if so lower it accordingly // // Arguments: // node - The indirect store node (GT_STORE_IND) of interest // // Return Value: // Returns true if 'node' is a valid RMW mem op; false otherwise. // bool Lowering::LowerRMWMemOp(GenTreeIndir* storeInd) { assert(storeInd->OperGet() == GT_STOREIND); // SSE2 doesn't support RMW on float values assert(!varTypeIsFloating(storeInd)); // Terminology: // indirDst = memory write of an addr mode (i.e. storeind destination) // indirSrc = value being written to memory (i.e. storeind source which could a binary/unary op) // indirCandidate = memory read i.e. a gtInd of an addr mode // indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node) GenTree* indirCandidate = nullptr; GenTree* indirOpSource = nullptr; if (!IsRMWMemOpRootedAtStoreInd(storeInd, &indirCandidate, &indirOpSource)) { JITDUMP("Lower of StoreInd didn't mark the node as self contained for reason: %s\n", RMWStatusDescription(storeInd->AsStoreInd()->GetRMWStatus())); DISPTREERANGE(BlockRange(), storeInd); return false; } GenTree* indirDst = storeInd->gtGetOp1(); GenTree* indirSrc = storeInd->gtGetOp2(); genTreeOps oper = indirSrc->OperGet(); // At this point we have successfully detected a RMW memory op of one of the following forms // storeInd(indirDst, indirSrc(indirCandidate, indirOpSource)) OR // storeInd(indirDst, indirSrc(indirOpSource, indirCandidate) in case of commutative operations OR // storeInd(indirDst, indirSrc(indirCandidate) in case of unary operations // // Here indirSrc = one of the supported binary or unary operation for RMW of memory // indirCandidate = a GT_IND node // indirCandidateChild = operand of GT_IND indirCandidate // // The logic below does the following // Make indirOpSource contained. // Make indirSrc contained. // Make indirCandidate contained. // Make indirCandidateChild contained. // Make indirDst contained except when it is a GT_LCL_VAR or GT_CNS_INT that doesn't fit within addr // base. // // We have already done containment analysis on the indirSrc op. // If any of its operands are marked regOptional, reset that now. indirSrc->AsOp()->gtOp1->ClearRegOptional(); if (GenTree::OperIsBinary(oper)) { // On Xarch RMW operations require the source to be an immediate or in a register. // Therefore, if we have previously marked the indirOpSource as contained while lowering // the binary node, we need to reset that now. if (IsContainableMemoryOp(indirOpSource)) { indirOpSource->ClearContained(); } indirSrc->AsOp()->gtOp2->ClearRegOptional(); JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n"); } else { assert(GenTree::OperIsUnary(oper)); JITDUMP("Lower succesfully detected an assignment of the form: *addrMode = UnaryOp(*addrMode)\n"); } DISPTREERANGE(BlockRange(), storeInd); indirSrc->SetContained(); indirCandidate->SetContained(); GenTree* indirCandidateChild = indirCandidate->gtGetOp1(); indirCandidateChild->SetContained(); if (indirCandidateChild->OperGet() == GT_LEA) { GenTreeAddrMode* addrMode = indirCandidateChild->AsAddrMode(); if (addrMode->HasBase()) { assert(addrMode->Base()->OperIsLeaf()); addrMode->Base()->SetContained(); } if (addrMode->HasIndex()) { assert(addrMode->Index()->OperIsLeaf()); addrMode->Index()->SetContained(); } indirDst->SetContained(); } else { assert(indirCandidateChild->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR, GT_CNS_INT)); // If it is a GT_LCL_VAR, it still needs the reg to hold the address. // We would still need a reg for GT_CNS_INT if it doesn't fit within addressing mode base. // For GT_CLS_VAR_ADDR, we don't need a reg to hold the address, because field address value is known at jit // time. Also, we don't need a reg for GT_CLS_VAR_ADDR. if (indirCandidateChild->OperIs(GT_LCL_VAR_ADDR, GT_CLS_VAR_ADDR)) { indirDst->SetContained(); } else if (indirCandidateChild->IsCnsIntOrI() && indirCandidateChild->AsIntConCommon()->FitsInAddrBase(comp)) { indirDst->SetContained(); } } return true; } //------------------------------------------------------------------------ // ContainCheckBinary: Determine whether a binary op's operands should be contained. // // Arguments: // node - the node we care about // void Lowering::ContainCheckBinary(GenTreeOp* node) { assert(node->OperIsBinary()); if (varTypeIsFloating(node)) { assert(node->OperIs(GT_ADD, GT_SUB)); ContainCheckFloatBinary(node); return; } GenTree* op1 = node->gtOp1; GenTree* op2 = node->gtOp2; // We can directly encode the second operand if it is either a containable constant or a memory-op. // In case of memory-op, we can encode it directly provided its type matches with 'tree' type. // This is because during codegen, type of 'tree' is used to determine emit Type size. If the types // do not match, they get normalized (i.e. sign/zero extended) on load into a register. bool directlyEncodable = false; bool binOpInRMW = false; GenTree* operand = nullptr; bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (IsContainableImmed(node, op2)) { directlyEncodable = true; operand = op2; } else { binOpInRMW = IsBinOpInRMWStoreInd(node); if (!binOpInRMW) { const unsigned operatorSize = genTypeSize(node->TypeGet()); if ((genTypeSize(op2->TypeGet()) == operatorSize) && IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { directlyEncodable = true; operand = op2; } } if ((operand == nullptr) && node->OperIsCommutative()) { // If it is safe, we can reverse the order of operands of commutative operations for efficient // codegen if (IsContainableImmed(node, op1)) { directlyEncodable = true; operand = op1; } else if ((genTypeSize(op1->TypeGet()) == operatorSize) && IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { directlyEncodable = true; operand = op1; } } } } } if (directlyEncodable) { assert(operand != nullptr); MakeSrcContained(node, operand); } else if (!binOpInRMW) { // If this binary op neither has contained operands, nor is a // Read-Modify-Write (RMW) operation, we can mark its operands // as reg optional. // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, directlyEncodable would be true. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } //------------------------------------------------------------------------ // ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node) { assert(node->OperIs(GT_BOUNDS_CHECK)); GenTree* other; if (CheckImmedAndMakeContained(node, node->GetIndex())) { other = node->GetArrayLength(); } else if (CheckImmedAndMakeContained(node, node->GetArrayLength())) { other = node->GetIndex(); } else if (IsContainableMemoryOp(node->GetIndex())) { other = node->GetIndex(); } else { other = node->GetArrayLength(); } if (node->GetIndex()->TypeGet() == node->GetArrayLength()->TypeGet()) { if (IsContainableMemoryOp(other) && IsSafeToContainMem(node, other)) { MakeSrcContained(node, other); } else { // We can mark 'other' as reg optional, since it is not contained. other->SetRegOptional(); } } } //------------------------------------------------------------------------ // ContainCheckIntrinsic: determine whether the source of an INTRINSIC node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckIntrinsic(GenTreeOp* node) { assert(node->OperIs(GT_INTRINSIC)); NamedIntrinsic intrinsicName = node->AsIntrinsic()->gtIntrinsicName; if ((intrinsicName == NI_System_Math_Ceiling) || (intrinsicName == NI_System_Math_Floor) || (intrinsicName == NI_System_Math_Truncate) || (intrinsicName == NI_System_Math_Round) || (intrinsicName == NI_System_Math_Sqrt)) { GenTree* op1 = node->gtGetOp1(); if ((IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) || op1->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op1); } else { // Mark the operand as reg optional since codegen can still // generate code if op1 is on stack. op1->SetRegOptional(); } } } #ifdef FEATURE_SIMD //---------------------------------------------------------------------------------------------- // ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node. // // Arguments: // simdNode - The SIMD intrinsic node. // void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) { switch (simdNode->GetSIMDIntrinsicId()) { case SIMDIntrinsicInit: { GenTree* op1 = simdNode->Op(1); #ifndef TARGET_64BIT if (op1->OperGet() == GT_LONG) { MakeSrcContained(simdNode, op1); GenTree* op1lo = op1->gtGetOp1(); GenTree* op1hi = op1->gtGetOp2(); if ((op1lo->IsIntegralConst(0) && op1hi->IsIntegralConst(0)) || (op1lo->IsIntegralConst(-1) && op1hi->IsIntegralConst(-1))) { MakeSrcContained(op1, op1lo); MakeSrcContained(op1, op1hi); } } else #endif // !TARGET_64BIT if (op1->IsFPZero() || op1->IsIntegralConst(0) || (varTypeIsIntegral(simdNode->GetSimdBaseType()) && op1->IsIntegralConst(-1))) { MakeSrcContained(simdNode, op1); } else if ((comp->getSIMDSupportLevel() == SIMD_AVX2_Supported) && ((simdNode->GetSimdSize() == 16) || (simdNode->GetSimdSize() == 32))) { // Either op1 is a float or dbl constant or an addr if (op1->IsCnsFltOrDbl() || op1->OperIsLocalAddr()) { MakeSrcContained(simdNode, op1); } } } break; case SIMDIntrinsicInitArray: // We have an array and an index, which may be contained. CheckImmedAndMakeContained(simdNode, simdNode->Op(2)); break; case SIMDIntrinsicShuffleSSE2: // Second operand is an integer constant and marked as contained. assert(simdNode->Op(2)->IsCnsIntOrI()); MakeSrcContained(simdNode, simdNode->Op(2)); break; default: break; } } #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryGetContainableHWIntrinsicOp: Tries to get a containable node for a given HWIntrinsic // // Arguments: // [In] containingNode - The hardware intrinsic node which contains 'node' // [In/Out] pNode - The node to check and potentially replace with the containable node // [Out] supportsRegOptional - On return, this will be true if 'containingNode' supports regOptional operands // otherwise, false. // [In] transparentParentNode - optional "transparent" intrinsic parent like CreateScalarUnsafe // // Return Value: // true if 'node' is a containable by containingNode; otherwise, false. // // When true is returned 'node' (and by extension the relevant op of 'containingNode') may be modified // to handle special scenarios such as CreateScalarUnsafe which exist to bridge the type system with // the actual registers. // // When false is returned 'node' is not modified. // bool Lowering::TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode) { assert(containingNode != nullptr); assert((pNode != nullptr) && (*pNode != nullptr)); assert(supportsRegOptional != nullptr); NamedIntrinsic containingIntrinsicId = containingNode->GetHWIntrinsicId(); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(containingIntrinsicId); GenTree*& node = *pNode; // We shouldn't have called in here if containingNode doesn't support containment assert(HWIntrinsicInfo::SupportsContainment(containingIntrinsicId)); // containingNode supports nodes that read from an aligned memory address // // This will generally be an explicit LoadAligned instruction and is false for // machines with VEX support when minOpts is enabled. This is because there is // currently no way to guarantee that the address read from will always be // aligned and we want to assert that the address is aligned when optimizations // aren't enabled. However, when optimizations are enabled, we want to allow // folding of memory operands as it produces better codegen and allows simpler // coding patterns on the managed side. bool supportsAlignedSIMDLoads = false; // containingNode supports nodes that read from general memory // // We currently have to assume all "general" loads are unaligned. As such, this is // generally used to determine if we can mark the node as `regOptional` in the case // where `node` is not containable. However, this can also be used to determine whether // we can mark other types of reads as contained (such as when directly reading a local). bool supportsGeneralLoads = false; // containingNode supports nodes that read from a scalar memory address // // This will generally be an explicit LoadScalar instruction but is also used to determine // whether we can read an address of type T (we don't support this when the load would // read more than sizeof(T) bytes). bool supportsSIMDScalarLoads = false; // containingNode supports nodes that read from an unaligned memory address // // This will generally be an explicit Load instruction and is generally false for machines // without VEX support. This is because older hardware required that the SIMD operand always // be aligned to the 'natural alignment' of the type. bool supportsUnalignedSIMDLoads = false; switch (category) { case HW_Category_MemoryLoad: { supportsGeneralLoads = !node->OperIsHWIntrinsic(); break; } case HW_Category_SimpleSIMD: { switch (containingIntrinsicId) { case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: { assert(!supportsSIMDScalarLoads); if (!containingNode->OperIsMemoryLoad()) { // The containable form is the one that takes a SIMD value, that may be in memory. if (!comp->canUseVexEncoding()) { supportsAlignedSIMDLoads = true; supportsUnalignedSIMDLoads = !supportsAlignedSIMDLoads; } else { supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; } // General loads are a bit special where we need at least `sizeof(simdType) / (sizeof(baseType) // * 2)` elements // For example: // * ConvertToVector128Int16 - sizeof(simdType) = 16; sizeof(baseType) = 1; expectedSize = 8 // * ConvertToVector128Int32 - sizeof(simdType) = 16; sizeof(baseType) = 1 | 2; // expectedSize = 8 | 4 // * ConvertToVector128Int64 - sizeof(simdType) = 16; sizeof(baseType) = 1 | 2 | 4; // expectedSize = 8 | 4 | 2 // * ConvertToVector256Int16 - sizeof(simdType) = 32; sizeof(baseType) = 1; expectedSize = 16 // * ConvertToVector256Int32 - sizeof(simdType) = 32; sizeof(baseType) = 1 | 2; // expectedSize = 16 | 8 // * ConvertToVector256Int64 - sizeof(simdType) = 32; sizeof(baseType) = 1 | 2 | 4; // expectedSize = 16 | 8 | 4 const unsigned sizeof_simdType = genTypeSize(containingNode->TypeGet()); const unsigned sizeof_baseType = genTypeSize(containingNode->GetSimdBaseType()); assert((sizeof_simdType == 16) || (sizeof_simdType == 32)); assert((sizeof_baseType == 1) || (sizeof_baseType == 2) || (sizeof_baseType == 4)); const unsigned expectedSize = sizeof_simdType / (sizeof_baseType * 2); const unsigned operandSize = genTypeSize(node->TypeGet()); assert((sizeof_simdType != 16) || (expectedSize == 8) || (expectedSize == 4) || (expectedSize == 2)); assert((sizeof_simdType != 32) || (expectedSize == 16) || (expectedSize == 8) || (expectedSize == 4)); supportsGeneralLoads = (operandSize >= expectedSize); } else { // The memory form of this already takes a pointer and should be treated like a MemoryLoad supportsGeneralLoads = !node->OperIsHWIntrinsic(); } break; } case NI_SSE2_ConvertToVector128Double: case NI_SSE3_MoveAndDuplicate: case NI_AVX_ConvertToVector256Double: { assert(!supportsSIMDScalarLoads); // Most instructions under the non-VEX encoding require aligned operands. // Those used for Sse2.ConvertToVector128Double (CVTDQ2PD and CVTPS2PD) // and Sse3.MoveAndDuplicate (MOVDDUP) are exceptions and don't fail for // unaligned inputs as they read mem64 (half the vector width) instead supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; const unsigned expectedSize = genTypeSize(containingNode->TypeGet()) / 2; const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } default: { assert(!supportsSIMDScalarLoads); if (!comp->canUseVexEncoding()) { assert(!supportsUnalignedSIMDLoads); supportsAlignedSIMDLoads = true; } else { supportsAlignedSIMDLoads = !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = true; } const unsigned expectedSize = genTypeSize(containingNode->TypeGet()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } } assert(supportsSIMDScalarLoads == false); break; } case HW_Category_IMM: { switch (containingIntrinsicId) { case NI_SSE_Shuffle: case NI_SSE2_ShiftLeftLogical: case NI_SSE2_ShiftRightArithmetic: case NI_SSE2_ShiftRightLogical: case NI_SSE2_Shuffle: case NI_SSE2_ShuffleHigh: case NI_SSE2_ShuffleLow: case NI_SSSE3_AlignRight: case NI_SSE41_Blend: case NI_SSE41_DotProduct: case NI_SSE41_MultipleSumAbsoluteDifferences: case NI_AES_KeygenAssist: case NI_PCLMULQDQ_CarrylessMultiply: case NI_AVX_Blend: case NI_AVX_Compare: case NI_AVX_DotProduct: case NI_AVX_Permute: case NI_AVX_Permute2x128: case NI_AVX2_Blend: case NI_AVX2_MultipleSumAbsoluteDifferences: case NI_AVX2_Permute2x128: case NI_AVX2_Permute4x64: case NI_AVX2_ShiftLeftLogical: case NI_AVX2_ShiftRightArithmetic: case NI_AVX2_ShiftRightLogical: case NI_AVX2_ShuffleHigh: case NI_AVX2_ShuffleLow: { assert(!supportsSIMDScalarLoads); const unsigned expectedSize = genTypeSize(containingNode->GetSimdBaseType()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } case NI_AVX_InsertVector128: case NI_AVX2_InsertVector128: { // InsertVector128 is special in that that it returns a TYP_SIMD32 but takes a TYP_SIMD16 assert(!supportsSIMDScalarLoads); const unsigned expectedSize = 16; const unsigned operandSize = genTypeSize(node->TypeGet()); supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; } case NI_SSE2_Insert: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); if (containingNode->GetSimdBaseType() == TYP_FLOAT) { assert(containingIntrinsicId == NI_SSE41_Insert); // Sse41.Insert(V128<float>, V128<float>, byte) is a bit special // in that it has different behavior depending on whether the // second operand is coming from a register or memory. When coming // from a register, all 4 elements of the vector can be used and it // is effectively a regular `SimpleSIMD` operation; but when loading // from memory, it only works with the lowest element and is effectively // a `SIMDScalar`. assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); GenTree* op1 = containingNode->Op(1); GenTree* op2 = containingNode->Op(2); GenTree* op3 = containingNode->Op(3); // The upper two bits of the immediate value are ignored if // op2 comes from memory. In order to support using the upper // bits, we need to disable containment support if op3 is not // constant or if the constant is greater than 0x3F (which means // at least one of the upper two bits is set). if (op3->IsCnsIntOrI()) { ssize_t ival = op3->AsIntCon()->IconValue(); assert((ival >= 0) && (ival <= 255)); supportsSIMDScalarLoads = (ival <= 0x3F); supportsGeneralLoads = supportsSIMDScalarLoads; } break; } // We should only get here for integral nodes. assert(varTypeIsIntegral(node->TypeGet())); assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(containingNode->GetSimdBaseType()); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } case NI_AVX_CompareScalar: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } default: { assert(supportsAlignedSIMDLoads == false); assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); assert(supportsUnalignedSIMDLoads == false); break; } } break; } case HW_Category_SIMDScalar: { assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); switch (containingIntrinsicId) { case NI_Vector128_CreateScalarUnsafe: case NI_Vector256_CreateScalarUnsafe: { if (!varTypeIsIntegral(node->TypeGet())) { // The floating-point overload doesn't require any special semantics supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } // The integral overloads only take GPR/mem assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType())); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } case NI_AVX2_BroadcastScalarToVector128: case NI_AVX2_BroadcastScalarToVector256: { if (!containingNode->OperIsMemoryLoad()) { // The containable form is the one that takes a SIMD value, that may be in memory. supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; } else { // The memory form of this already takes a pointer and should be treated like a MemoryLoad supportsGeneralLoads = !node->OperIsHWIntrinsic(); } break; } case NI_SSE_ConvertScalarToVector128Single: case NI_SSE2_ConvertScalarToVector128Double: case NI_SSE2_ConvertScalarToVector128Int32: case NI_SSE2_ConvertScalarToVector128UInt32: case NI_SSE_X64_ConvertScalarToVector128Single: case NI_SSE2_X64_ConvertScalarToVector128Double: case NI_SSE2_X64_ConvertScalarToVector128Int64: case NI_SSE2_X64_ConvertScalarToVector128UInt64: { if (!varTypeIsIntegral(node->TypeGet())) { // The floating-point overload doesn't require any special semantics assert(containingIntrinsicId == NI_SSE2_ConvertScalarToVector128Double); supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } // The integral overloads only take GPR/mem assert(supportsSIMDScalarLoads == false); const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType())); const unsigned operandSize = genTypeSize(node->TypeGet()); supportsGeneralLoads = (operandSize >= expectedSize); break; } default: { supportsSIMDScalarLoads = true; supportsGeneralLoads = supportsSIMDScalarLoads; break; } } break; } case HW_Category_Scalar: { // We should only get here for integral nodes. assert(varTypeIsIntegral(node->TypeGet())); assert(supportsAlignedSIMDLoads == false); assert(supportsUnalignedSIMDLoads == false); assert(supportsSIMDScalarLoads == false); unsigned expectedSize = genTypeSize(containingNode->TypeGet()); const unsigned operandSize = genTypeSize(node->TypeGet()); // CRC32 codegen depends on its second oprand's type. // Currently, we are using SIMDBaseType to store the op2Type info. if (containingIntrinsicId == NI_SSE42_Crc32) { var_types op2Type = containingNode->GetSimdBaseType(); expectedSize = genTypeSize(op2Type); } supportsGeneralLoads = (operandSize >= expectedSize); break; } default: { assert(supportsAlignedSIMDLoads == false); assert(supportsGeneralLoads == false); assert(supportsSIMDScalarLoads == false); assert(supportsUnalignedSIMDLoads == false); break; } } *supportsRegOptional = supportsGeneralLoads; if (!node->OperIsHWIntrinsic()) { bool canBeContained = false; if (supportsGeneralLoads) { if (IsContainableMemoryOp(node)) { // Code motion safety checks // if (transparentParentNode != nullptr) { canBeContained = IsSafeToContainMem(containingNode, transparentParentNode, node); } else { canBeContained = IsSafeToContainMem(containingNode, node); } } else if (node->IsCnsNonZeroFltOrDbl()) { // Always safe. // canBeContained = true; } } return canBeContained; } // TODO-XArch: Update this to be table driven, if possible. GenTreeHWIntrinsic* hwintrinsic = node->AsHWIntrinsic(); NamedIntrinsic intrinsicId = hwintrinsic->GetHWIntrinsicId(); switch (intrinsicId) { case NI_Vector128_CreateScalarUnsafe: case NI_Vector256_CreateScalarUnsafe: { if (!supportsSIMDScalarLoads) { return false; } GenTree* op1 = hwintrinsic->Op(1); bool op1SupportsRegOptional = false; if (!TryGetContainableHWIntrinsicOp(containingNode, &op1, &op1SupportsRegOptional, hwintrinsic)) { return false; } LIR::Use use; if (!BlockRange().TryGetUse(node, &use) || (use.User() != containingNode)) { return false; } // We have CreateScalarUnsafe where the underlying scalar is directly containable // by containingNode. As such, we'll just remove CreateScalarUnsafe and consume // the value directly. use.ReplaceWith(op1); BlockRange().Remove(node); node = op1; node->ClearContained(); return true; } case NI_SSE_LoadAlignedVector128: case NI_SSE2_LoadAlignedVector128: case NI_AVX_LoadAlignedVector256: { return supportsAlignedSIMDLoads; } case NI_SSE_LoadScalarVector128: case NI_SSE2_LoadScalarVector128: { return supportsSIMDScalarLoads; } case NI_SSE_LoadVector128: case NI_SSE2_LoadVector128: case NI_AVX_LoadVector256: { return supportsUnalignedSIMDLoads; } case NI_AVX_ExtractVector128: case NI_AVX2_ExtractVector128: { return false; } default: { assert(!node->isContainableHWIntrinsic()); return false; } } } //---------------------------------------------------------------------------------------------- // ContainCheckHWIntrinsicAddr: Perform containment analysis for an address operand of a hardware // intrinsic node. // // Arguments: // node - The hardware intrinsic node // addr - The address node to try contain // void Lowering::ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr) { assert((addr->TypeGet() == TYP_I_IMPL) || (addr->TypeGet() == TYP_BYREF)); TryCreateAddrMode(addr, true, node); if ((addr->OperIs(GT_CLS_VAR_ADDR, GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR, GT_LEA) || (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp))) && IsSafeToContainMem(node, addr)) { MakeSrcContained(node, addr); } } //---------------------------------------------------------------------------------------------- // ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node. // // Arguments: // node - The hardware intrinsic node. // void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicId = node->GetHWIntrinsicId(); HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId); size_t numArgs = node->GetOperandCount(); CorInfoType simdBaseJitType = node->GetSimdBaseJitType(); var_types simdBaseType = node->GetSimdBaseType(); if (!HWIntrinsicInfo::SupportsContainment(intrinsicId)) { // AVX2 gather are not containable and always have constant IMM argument if (HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsicId)) { GenTree* lastOp = node->Op(numArgs); MakeSrcContained(node, lastOp); } // Exit early if containment isn't supported return; } if (HWIntrinsicInfo::lookupCategory(intrinsicId) == HW_Category_IMM) { GenTree* lastOp = node->Op(numArgs); if (HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI()) { MakeSrcContained(node, lastOp); } } if ((node->GetSimdSize() == 8) || (node->GetSimdSize() == 12)) { // We want to handle GetElement still for Vector2/3 if ((intrinsicId != NI_Vector128_GetElement) && (intrinsicId != NI_Vector256_GetElement)) { // TODO-XArch-CQ: Ideally we would key this off of the size containingNode // expects vs the size node actually is or would be if spilled to the stack return; } } // TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained const bool isCommutative = HWIntrinsicInfo::IsCommutative(intrinsicId); GenTree* op1 = nullptr; GenTree* op2 = nullptr; GenTree* op3 = nullptr; if (numArgs == 1) { // One argument intrinsics cannot be commutative assert(!isCommutative); op1 = node->Op(1); switch (category) { case HW_Category_MemoryLoad: ContainCheckHWIntrinsicAddr(node, op1); break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { switch (intrinsicId) { case NI_SSE_ReciprocalScalar: case NI_SSE_ReciprocalSqrtScalar: case NI_SSE_SqrtScalar: case NI_SSE2_SqrtScalar: case NI_SSE41_CeilingScalar: case NI_SSE41_FloorScalar: case NI_SSE41_RoundCurrentDirectionScalar: case NI_SSE41_RoundToNearestIntegerScalar: case NI_SSE41_RoundToNegativeInfinityScalar: case NI_SSE41_RoundToPositiveInfinityScalar: case NI_SSE41_RoundToZeroScalar: { // These intrinsics have both 1 and 2-operand overloads. // // The 1-operand overload basically does `intrinsic(op1, op1)` // // Because of this, the operand must be loaded into a register // and cannot be contained. return; } case NI_SSE2_ConvertToInt32: case NI_SSE2_X64_ConvertToInt64: case NI_SSE2_ConvertToUInt32: case NI_SSE2_X64_ConvertToUInt64: case NI_AVX2_ConvertToInt32: case NI_AVX2_ConvertToUInt32: { if (varTypeIsIntegral(simdBaseType)) { // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm" and don't // currently support containment. return; } break; } case NI_SSE41_ConvertToVector128Int16: case NI_SSE41_ConvertToVector128Int32: case NI_SSE41_ConvertToVector128Int64: case NI_AVX2_ConvertToVector256Int16: case NI_AVX2_ConvertToVector256Int32: case NI_AVX2_ConvertToVector256Int64: if (!varTypeIsSIMD(op1)) { ContainCheckHWIntrinsicAddr(node, op1); return; } break; default: { break; } } bool supportsRegOptional = false; if (node->OperIsMemoryLoad()) { // We have a few cases that can be potential memory loads assert((intrinsicId == NI_SSE41_ConvertToVector128Int16) || (intrinsicId == NI_SSE41_ConvertToVector128Int32) || (intrinsicId == NI_SSE41_ConvertToVector128Int64) || (intrinsicId == NI_AVX2_BroadcastScalarToVector128) || (intrinsicId == NI_AVX2_BroadcastScalarToVector256) || (intrinsicId == NI_AVX2_ConvertToVector256Int16) || (intrinsicId == NI_AVX2_ConvertToVector256Int32) || (intrinsicId == NI_AVX2_ConvertToVector256Int64)); ContainCheckHWIntrinsicAddr(node, op1); } else if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } default: { unreached(); break; } } } else { if (numArgs == 2) { op1 = node->Op(1); op2 = node->Op(2); switch (category) { case HW_Category_MemoryLoad: if ((intrinsicId == NI_AVX_MaskLoad) || (intrinsicId == NI_AVX2_MaskLoad)) { ContainCheckHWIntrinsicAddr(node, op1); } else { ContainCheckHWIntrinsicAddr(node, op2); } break; case HW_Category_MemoryStore: ContainCheckHWIntrinsicAddr(node, op1); if (((intrinsicId == NI_SSE_Store) || (intrinsicId == NI_SSE2_Store)) && op2->OperIsHWIntrinsic() && ((op2->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AVX_ExtractVector128) || (op2->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AVX2_ExtractVector128)) && op2->gtGetOp2()->IsIntegralConst()) { MakeSrcContained(node, op2); } break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { bool supportsRegOptional = false; if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if ((isCommutative || (intrinsicId == NI_BMI2_MultiplyNoFlags) || (intrinsicId == NI_BMI2_X64_MultiplyNoFlags)) && TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); // Swap the operands here to make the containment checks in codegen significantly simpler node->Op(1) = op2; node->Op(2) = op1; } else if (supportsRegOptional) { op2->SetRegOptional(); // TODO-XArch-CQ: For commutative nodes, either operand can be reg-optional. // https://github.com/dotnet/runtime/issues/6358 } break; } case HW_Category_IMM: { // We don't currently have any IMM intrinsics which are also commutative assert(!isCommutative); bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE2_Extract: case NI_AVX_ExtractVector128: case NI_AVX2_ExtractVector128: { // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm, imm8" and don't // currently support containment. break; } case NI_SSE2_ShiftLeftLogical: case NI_SSE2_ShiftRightArithmetic: case NI_SSE2_ShiftRightLogical: case NI_AVX2_ShiftLeftLogical: case NI_AVX2_ShiftRightArithmetic: case NI_AVX2_ShiftRightLogical: { // These intrinsics can have op2 be imm or reg/mem if (!HWIntrinsicInfo::isImmOp(intrinsicId, op2)) { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } } break; } case NI_SSE2_Shuffle: case NI_SSE2_ShuffleHigh: case NI_SSE2_ShuffleLow: case NI_AVX2_Permute4x64: case NI_AVX2_Shuffle: case NI_AVX2_ShuffleHigh: case NI_AVX2_ShuffleLow: { // These intrinsics have op2 as an imm and op1 as a reg/mem if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } case NI_SSE41_Extract: case NI_SSE41_X64_Extract: { assert(!varTypeIsFloating(simdBaseType)); // TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm, imm8" and don't // currently support containment. break; } case NI_AVX_Permute: { // These intrinsics can have op2 be imm or reg/mem // They also can have op1 be reg/mem and op2 be imm if (HWIntrinsicInfo::isImmOp(intrinsicId, op2)) { if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } } else if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } case NI_AES_KeygenAssist: { if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); } else if (supportsRegOptional) { op1->SetRegOptional(); } break; } case NI_SSE2_ShiftLeftLogical128BitLane: case NI_SSE2_ShiftRightLogical128BitLane: case NI_AVX2_ShiftLeftLogical128BitLane: case NI_AVX2_ShiftRightLogical128BitLane: { #if DEBUG // These intrinsics should have been marked contained by the general-purpose handling // earlier in the method. GenTree* lastOp = node->Op(numArgs); if (HWIntrinsicInfo::isImmOp(intrinsicId, lastOp) && lastOp->IsCnsIntOrI()) { assert(lastOp->isContained()); } #endif break; } default: { assert(!"Unhandled containment for binary hardware intrinsic with immediate operand"); break; } } break; } case HW_Category_Helper: { // We don't currently have any IMM intrinsics which are also commutative assert(!isCommutative); switch (intrinsicId) { case NI_Vector128_GetElement: case NI_Vector256_GetElement: { if (op1->OperIs(GT_IND)) { assert((op1->gtFlags & GTF_IND_REQ_ADDR_IN_REG) != 0); op1->AsIndir()->Addr()->ClearContained(); } if (op2->OperIsConst()) { MakeSrcContained(node, op2); } if (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)) { MakeSrcContained(node, op1); if (op1->OperIs(GT_IND)) { op1->AsIndir()->Addr()->ClearContained(); } } break; } default: { assert(!"Unhandled containment for helper binary hardware intrinsic"); break; } } break; } default: { unreached(); break; } } } else if (numArgs == 3) { // three argument intrinsics should not be marked commutative assert(!isCommutative); op1 = node->Op(1); op2 = node->Op(2); op3 = node->Op(3); switch (category) { case HW_Category_MemoryStore: ContainCheckHWIntrinsicAddr(node, op1); break; case HW_Category_SimpleSIMD: case HW_Category_SIMDScalar: case HW_Category_Scalar: { if ((intrinsicId >= NI_FMA_MultiplyAdd) && (intrinsicId <= NI_FMA_MultiplySubtractNegatedScalar)) { bool supportsOp1RegOptional = false; bool supportsOp2RegOptional = false; bool supportsOp3RegOptional = false; unsigned resultOpNum = 0; LIR::Use use; GenTree* user = nullptr; if (BlockRange().TryGetUse(node, &use)) { user = use.User(); } resultOpNum = node->GetResultOpNumForFMA(user, op1, op2, op3); // Prioritize Containable op. Check if any one of the op is containable first. // Set op regOptional only if none of them is containable. // Prefer to make op3 contained, if (resultOpNum != 3 && TryGetContainableHWIntrinsicOp(node, &op3, &supportsOp3RegOptional)) { // result = (op1 * op2) + [op3] MakeSrcContained(node, op3); } else if (resultOpNum != 2 && TryGetContainableHWIntrinsicOp(node, &op2, &supportsOp2RegOptional)) { // result = (op1 * [op2]) + op3 MakeSrcContained(node, op2); } else if (resultOpNum != 1 && !HWIntrinsicInfo::CopiesUpperBits(intrinsicId) && TryGetContainableHWIntrinsicOp(node, &op1, &supportsOp1RegOptional)) { // result = ([op1] * op2) + op3 MakeSrcContained(node, op1); } else if (supportsOp3RegOptional) { assert(resultOpNum != 3); op3->SetRegOptional(); } else if (supportsOp2RegOptional) { assert(resultOpNum != 2); op2->SetRegOptional(); } else if (supportsOp1RegOptional) { op1->SetRegOptional(); } } else { bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE41_BlendVariable: case NI_AVX_BlendVariable: case NI_AVX2_BlendVariable: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } case NI_AVXVNNI_MultiplyWideningAndAdd: case NI_AVXVNNI_MultiplyWideningAndAddSaturate: { if (TryGetContainableHWIntrinsicOp(node, &op3, &supportsRegOptional)) { MakeSrcContained(node, op3); } else if (supportsRegOptional) { op3->SetRegOptional(); } break; } case NI_BMI2_MultiplyNoFlags: case NI_BMI2_X64_MultiplyNoFlags: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (TryGetContainableHWIntrinsicOp(node, &op1, &supportsRegOptional)) { MakeSrcContained(node, op1); // MultiplyNoFlags is a Commutative operation, so swap the first two operands here // to make the containment checks in codegen significantly simpler node->Op(1) = op2; node->Op(2) = op1; } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } default: { unreached(); break; } } } break; } case HW_Category_IMM: { bool supportsRegOptional = false; switch (intrinsicId) { case NI_SSE_Shuffle: case NI_SSE2_Insert: case NI_SSE2_Shuffle: case NI_SSSE3_AlignRight: case NI_SSE41_Blend: case NI_SSE41_DotProduct: case NI_SSE41_Insert: case NI_SSE41_X64_Insert: case NI_SSE41_MultipleSumAbsoluteDifferences: case NI_AVX_Blend: case NI_AVX_Compare: case NI_AVX_CompareScalar: case NI_AVX_DotProduct: case NI_AVX_InsertVector128: case NI_AVX_Permute2x128: case NI_AVX_Shuffle: case NI_AVX2_AlignRight: case NI_AVX2_Blend: case NI_AVX2_InsertVector128: case NI_AVX2_MultipleSumAbsoluteDifferences: case NI_AVX2_Permute2x128: case NI_PCLMULQDQ_CarrylessMultiply: { if (TryGetContainableHWIntrinsicOp(node, &op2, &supportsRegOptional)) { MakeSrcContained(node, op2); } else if (supportsRegOptional) { op2->SetRegOptional(); } break; } default: { assert(!"Unhandled containment for ternary hardware intrinsic with immediate operand"); break; } } break; } default: { unreached(); break; } } } else { unreached(); } } } #endif // FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // ContainCheckFloatBinary: determine whether the sources of a floating point binary node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckFloatBinary(GenTreeOp* node) { assert(node->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_DIV) && varTypeIsFloating(node)); // overflow operations aren't supported on float/double types. assert(!node->gtOverflowEx()); GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); // No implicit conversions at this stage as the expectation is that // everything is made explicit by adding casts. assert(op1->TypeGet() == op2->TypeGet()); bool isSafeToContainOp1 = true; bool isSafeToContainOp2 = true; if (op2->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op2); } else if (IsContainableMemoryOp(op2)) { isSafeToContainOp2 = IsSafeToContainMem(node, op2); if (isSafeToContainOp2) { MakeSrcContained(node, op2); } } if (!op2->isContained() && node->OperIsCommutative()) { // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands // as long as it is safe so that the following efficient code sequence is generated: // addss/sd targetReg, memOp (if op1Reg == targetReg) OR // movaps targetReg, op2Reg; addss/sd targetReg, [memOp] // // Instead of // movss op1Reg, [memOp]; addss/sd targetReg, Op2Reg (if op1Reg == targetReg) OR // movss op1Reg, [memOp]; movaps targetReg, op1Reg, addss/sd targetReg, Op2Reg if (op1->IsCnsNonZeroFltOrDbl()) { MakeSrcContained(node, op1); } else if (IsContainableMemoryOp(op1)) { isSafeToContainOp1 = IsSafeToContainMem(node, op1); if (isSafeToContainOp1) { MakeSrcContained(node, op1); } } } if (!op1->isContained() && !op2->isContained()) { // If there are no containable operands, we can make an operand reg optional. // IsSafeToContainMem is expensive so we call it at most once for each operand // in this method. If we already called IsSafeToContainMem, it must have returned false; // otherwise, the corresponding operand (op1 or op2) would be contained. isSafeToContainOp1 = isSafeToContainOp1 && IsSafeToContainMem(node, op1); isSafeToContainOp2 = isSafeToContainOp2 && IsSafeToContainMem(node, op2); SetRegOptionalForBinOp(node, isSafeToContainOp1, isSafeToContainOp2); } } #endif // TARGET_XARCH
1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/pal/src/libunwind/src/unwind/unwind-internal.h
/* libunwind - a platform-independent unwind library Copyright (C) 2003, 2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef unwind_internal_h #define unwind_internal_h #define UNW_LOCAL_ONLY #include <unwind.h> #include <stdlib.h> #include <libunwind.h> #include "libunwind_i.h" /* The version of the _Unwind_*() interface implemented by this code. */ #define _U_VERSION 1 typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) (int, _Unwind_Action, uint64_t, struct _Unwind_Exception *, struct _Unwind_Context *); struct _Unwind_Context { unw_cursor_t cursor; int end_of_stack; /* set to 1 if the end of stack was reached */ }; /* This must be a macro because unw_getcontext() must be invoked from the callee, even if optimization (and hence inlining) is turned off. The macro arguments MUST NOT have any side-effects. */ #define _Unwind_InitContext(context, uc) \ ((context)->end_of_stack = 0, \ ((unw_getcontext (uc) < 0 || unw_init_local (&(context)->cursor, uc) < 0) \ ? -1 : 0)) static _Unwind_Reason_Code ALWAYS_INLINE _Unwind_Phase2 (struct _Unwind_Exception *exception_object, struct _Unwind_Context *context) { _Unwind_Stop_Fn stop = (_Unwind_Stop_Fn) exception_object->private_1; uint64_t exception_class = exception_object->exception_class; void *stop_parameter = (void *) exception_object->private_2; _Unwind_Personality_Fn personality; _Unwind_Reason_Code reason; _Unwind_Action actions; unw_proc_info_t pi; unw_word_t ip; int ret; actions = _UA_CLEANUP_PHASE; if (stop) actions |= _UA_FORCE_UNWIND; while (1) { ret = unw_step (&context->cursor); if (ret <= 0) { if (ret == 0) { actions |= _UA_END_OF_STACK; context->end_of_stack = 1; } else return _URC_FATAL_PHASE2_ERROR; } if (stop) { reason = (*stop) (_U_VERSION, actions, exception_class, exception_object, context, stop_parameter); if (reason != _URC_NO_REASON) /* Stop function may return _URC_FATAL_PHASE2_ERROR if it's unable to handle end-of-stack condition or _URC_FATAL_PHASE2_ERROR if something is wrong. Not that it matters: the resulting state is indeterminate anyhow so we must return _URC_FATAL_PHASE2_ERROR... */ return _URC_FATAL_PHASE2_ERROR; } if (context->end_of_stack || unw_get_proc_info (&context->cursor, &pi) < 0) return _URC_FATAL_PHASE2_ERROR; personality = (_Unwind_Personality_Fn) (uintptr_t) pi.handler; if (personality) { if (!stop) { if (unw_get_reg (&context->cursor, UNW_REG_IP, &ip) < 0) return _URC_FATAL_PHASE2_ERROR; if ((unsigned long) stop_parameter == ip) actions |= _UA_HANDLER_FRAME; } reason = (*personality) (_U_VERSION, actions, exception_class, exception_object, context); if (reason != _URC_CONTINUE_UNWIND) { if (reason == _URC_INSTALL_CONTEXT) { /* we may regain control via _Unwind_Resume() */ unw_resume (&context->cursor); abort (); } else return _URC_FATAL_PHASE2_ERROR; } if (actions & _UA_HANDLER_FRAME) /* The personality routine for the handler-frame changed it's mind; that's a no-no... */ abort (); } } return _URC_FATAL_PHASE2_ERROR; /* shouldn't be reached */ } #endif /* unwind_internal_h */
/* libunwind - a platform-independent unwind library Copyright (C) 2003, 2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef unwind_internal_h #define unwind_internal_h #define UNW_LOCAL_ONLY #include <unwind.h> #include <stdlib.h> #include <libunwind.h> #include "libunwind_i.h" /* The version of the _Unwind_*() interface implemented by this code. */ #define _U_VERSION 1 typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) (int, _Unwind_Action, uint64_t, struct _Unwind_Exception *, struct _Unwind_Context *); struct _Unwind_Context { unw_cursor_t cursor; int end_of_stack; /* set to 1 if the end of stack was reached */ }; /* This must be a macro because unw_getcontext() must be invoked from the callee, even if optimization (and hence inlining) is turned off. The macro arguments MUST NOT have any side-effects. */ #define _Unwind_InitContext(context, uc) \ ((context)->end_of_stack = 0, \ ((unw_getcontext (uc) < 0 || unw_init_local (&(context)->cursor, uc) < 0) \ ? -1 : 0)) static _Unwind_Reason_Code ALWAYS_INLINE _Unwind_Phase2 (struct _Unwind_Exception *exception_object, struct _Unwind_Context *context) { _Unwind_Stop_Fn stop = (_Unwind_Stop_Fn) exception_object->private_1; uint64_t exception_class = exception_object->exception_class; void *stop_parameter = (void *) exception_object->private_2; _Unwind_Personality_Fn personality; _Unwind_Reason_Code reason; _Unwind_Action actions; unw_proc_info_t pi; unw_word_t ip; int ret; actions = _UA_CLEANUP_PHASE; if (stop) actions |= _UA_FORCE_UNWIND; while (1) { ret = unw_step (&context->cursor); if (ret <= 0) { if (ret == 0) { actions |= _UA_END_OF_STACK; context->end_of_stack = 1; } else return _URC_FATAL_PHASE2_ERROR; } if (stop) { reason = (*stop) (_U_VERSION, actions, exception_class, exception_object, context, stop_parameter); if (reason != _URC_NO_REASON) /* Stop function may return _URC_FATAL_PHASE2_ERROR if it's unable to handle end-of-stack condition or _URC_FATAL_PHASE2_ERROR if something is wrong. Not that it matters: the resulting state is indeterminate anyhow so we must return _URC_FATAL_PHASE2_ERROR... */ return _URC_FATAL_PHASE2_ERROR; } if (context->end_of_stack || unw_get_proc_info (&context->cursor, &pi) < 0) return _URC_FATAL_PHASE2_ERROR; personality = (_Unwind_Personality_Fn) (uintptr_t) pi.handler; if (personality) { if (!stop) { if (unw_get_reg (&context->cursor, UNW_REG_IP, &ip) < 0) return _URC_FATAL_PHASE2_ERROR; if ((unsigned long) stop_parameter == ip) actions |= _UA_HANDLER_FRAME; } reason = (*personality) (_U_VERSION, actions, exception_class, exception_object, context); if (reason != _URC_CONTINUE_UNWIND) { if (reason == _URC_INSTALL_CONTEXT) { /* we may regain control via _Unwind_Resume() */ unw_resume (&context->cursor); abort (); } else return _URC_FATAL_PHASE2_ERROR; } if (actions & _UA_HANDLER_FRAME) /* The personality routine for the handler-frame changed it's mind; that's a no-no... */ abort (); } } return _URC_FATAL_PHASE2_ERROR; /* shouldn't be reached */ } #endif /* unwind_internal_h */
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/unwinder/stdafx.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: stdafx.h // // Prevent the inclusion of Random.h from disabling rand(). rand() is used by some other headers we include // and there's no reason why DAC should be forbidden from using it. #define DO_NOT_DISABLE_RAND #define USE_COM_CONTEXT_DEF #include <common.h> #include <debugger.h> #include <methoditer.h> #ifdef DACCESS_COMPILE #include <dacprivate.h> #include <dacimpl.h> #endif // DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: stdafx.h // // Prevent the inclusion of Random.h from disabling rand(). rand() is used by some other headers we include // and there's no reason why DAC should be forbidden from using it. #define DO_NOT_DISABLE_RAND #define USE_COM_CONTEXT_DEF #include <common.h> #include <debugger.h> #include <methoditer.h> #ifdef DACCESS_COMPILE #include <dacprivate.h> #include <dacimpl.h> #endif // DACCESS_COMPILE
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/pal/tests/palsuite/threading/SignalObjectAndWait/SignalObjectAndWaitTest.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <palsuite.h> enum class SignalableObjectType { First = 0, Invalid = First, ManualResetEvent, AutoResetEvent, Semaphore, FullSemaphore, Mutex, UnlockedMutex, Last = UnlockedMutex }; enum class WaitableObjectType { First = 0, Invalid = First, ManualResetEvent, UnsignaledManualResetEvent, AutoResetEvent, UnsignaledAutoResetEvent, Semaphore, EmptySemaphore, Mutex, LockedMutex, Last = LockedMutex }; void operator ++(SignalableObjectType &objectType) { ++(int &)objectType; } void operator ++(WaitableObjectType &objectType) { ++(int &)objectType; } struct AssertionFailureException { const int lineNumber; const char *const expression; SignalableObjectType signalableObjectType; WaitableObjectType waitableObjectType; DWORD waitResult; DWORD errorCode; AssertionFailureException(int lineNumber, const char *expression) : lineNumber(lineNumber), expression(expression), signalableObjectType(SignalableObjectType::Invalid), waitableObjectType(WaitableObjectType::Invalid), waitResult(WAIT_OBJECT_0), errorCode(ERROR_SUCCESS) { } }; #define TestAssert(expression) \ do \ { \ if (!(expression)) \ { \ throw AssertionFailureException(__LINE__, "" #expression ""); \ } \ } while (false) HANDLE CreateObjectToSignal(SignalableObjectType objectType) { switch (objectType) { case SignalableObjectType::Invalid: return nullptr; case SignalableObjectType::ManualResetEvent: return CreateEvent(nullptr, true, false, nullptr); case SignalableObjectType::AutoResetEvent: return CreateEvent(nullptr, false, false, nullptr); case SignalableObjectType::Semaphore: return CreateSemaphoreExW(nullptr, 0, 1, nullptr, 0, 0); case SignalableObjectType::FullSemaphore: return CreateSemaphoreExW(nullptr, 1, 1, nullptr, 0, 0); case SignalableObjectType::Mutex: return CreateMutex(nullptr, true, nullptr); case SignalableObjectType::UnlockedMutex: return CreateMutex(nullptr, false, nullptr); default: TestAssert(false); } } void VerifySignal(HANDLE h, SignalableObjectType objectType) { switch (objectType) { case SignalableObjectType::ManualResetEvent: TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; case SignalableObjectType::AutoResetEvent: TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); SetEvent(h); break; case SignalableObjectType::Semaphore: TestAssert(!ReleaseSemaphore(h, 1, nullptr)); break; case SignalableObjectType::Mutex: TestAssert(!ReleaseMutex(h)); break; default: TestAssert(false); } } void CloseObjectToSignal(HANDLE h, SignalableObjectType objectType) { if (objectType != SignalableObjectType::Invalid) { CloseHandle(h); } } HANDLE CreateObjectToWaitOn(WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::Invalid: return nullptr; case WaitableObjectType::ManualResetEvent: return CreateEvent(nullptr, true, true, nullptr); case WaitableObjectType::UnsignaledManualResetEvent: return CreateEvent(nullptr, true, false, nullptr); case WaitableObjectType::AutoResetEvent: return CreateEvent(nullptr, false, true, nullptr); case WaitableObjectType::UnsignaledAutoResetEvent: return CreateEvent(nullptr, false, false, nullptr); case WaitableObjectType::Semaphore: return CreateSemaphoreExW(nullptr, 1, 1, nullptr, 0, 0); case WaitableObjectType::EmptySemaphore: return CreateSemaphoreExW(nullptr, 0, 1, nullptr, 0, 0); case WaitableObjectType::Mutex: return CreateMutex(nullptr, false, nullptr); case WaitableObjectType::LockedMutex: return CreateMutex(nullptr, true, nullptr); default: TestAssert(false); } } void VerifyWait(HANDLE h, WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::ManualResetEvent: case WaitableObjectType::UnsignaledManualResetEvent: break; case WaitableObjectType::AutoResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: case WaitableObjectType::Semaphore: case WaitableObjectType::EmptySemaphore: TestAssert(WaitForSingleObject(h, 0) == WAIT_TIMEOUT); break; case WaitableObjectType::Mutex: TestAssert(ReleaseMutex(h)); TestAssert(!ReleaseMutex(h)); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; case WaitableObjectType::LockedMutex: TestAssert(ReleaseMutex(h)); TestAssert(ReleaseMutex(h)); TestAssert(!ReleaseMutex(h)); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; default: TestAssert(false); } } void CloseObjectToWaitOn(HANDLE h, WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::ManualResetEvent: case WaitableObjectType::UnsignaledManualResetEvent: case WaitableObjectType::AutoResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: CloseHandle(h); break; case WaitableObjectType::Semaphore: case WaitableObjectType::EmptySemaphore: ReleaseSemaphore(h, 1, nullptr); CloseHandle(h); break; case WaitableObjectType::Mutex: ReleaseMutex(h); CloseHandle(h); break; case WaitableObjectType::LockedMutex: ReleaseMutex(h); ReleaseMutex(h); CloseHandle(h); break; default: break; } } bool Verify(SignalableObjectType signalableObjectType, WaitableObjectType waitableObjectType, DWORD waitResult, DWORD errorCode) { if (signalableObjectType == SignalableObjectType::Invalid || waitableObjectType == WaitableObjectType::Invalid) { TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_INVALID_HANDLE); return false; } switch (signalableObjectType) { case SignalableObjectType::FullSemaphore: TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_TOO_MANY_POSTS); return false; case SignalableObjectType::UnlockedMutex: TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_NOT_OWNER); return false; default: break; } switch (waitableObjectType) { case WaitableObjectType::UnsignaledManualResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: case WaitableObjectType::EmptySemaphore: TestAssert(waitResult == WAIT_TIMEOUT); break; default: TestAssert(waitResult == WAIT_OBJECT_0); break; } TestAssert(errorCode == ERROR_SUCCESS); return true; } void Run(SignalableObjectType signalableObjectType, WaitableObjectType waitableObjectType) { HANDLE objectToSignal = CreateObjectToSignal(signalableObjectType); TestAssert(signalableObjectType == SignalableObjectType::Invalid || objectToSignal != nullptr); HANDLE objectToWaitOn = CreateObjectToWaitOn(waitableObjectType); TestAssert(waitableObjectType == WaitableObjectType::Invalid || objectToWaitOn != nullptr); DWORD waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); DWORD errorCode = waitResult == WAIT_FAILED ? GetLastError() : ERROR_SUCCESS; try { if (Verify(signalableObjectType, waitableObjectType, waitResult, errorCode)) { VerifySignal(objectToSignal, signalableObjectType); VerifyWait(objectToWaitOn, waitableObjectType); } } catch (AssertionFailureException ex) { ex.signalableObjectType = signalableObjectType; ex.waitableObjectType = waitableObjectType; ex.waitResult = waitResult; ex.errorCode = errorCode; throw ex; } } static bool s_apcCalled = false; void CALLBACK ApcCallback(ULONG_PTR dwParam) { s_apcCalled = true; HANDLE *objects = (HANDLE *)dwParam; HANDLE objectToSignal = objects[0]; HANDLE objectToWaitOn = objects[1]; TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_OBJECT_0); // wait has not occurred yet SetEvent(objectToWaitOn); } void Run() { for (SignalableObjectType signalableObjectType = SignalableObjectType::First; signalableObjectType <= SignalableObjectType::Last; ++signalableObjectType) { for (WaitableObjectType waitableObjectType = WaitableObjectType::First; waitableObjectType <= WaitableObjectType::Last; ++waitableObjectType) { Run(signalableObjectType, waitableObjectType); } } DWORD waitResult = WAIT_FAILED; try { HANDLE objectToSignal = CreateObjectToSignal(SignalableObjectType::ManualResetEvent); TestAssert(objectToSignal != nullptr); HANDLE objectToWaitOn = CreateObjectToWaitOn(WaitableObjectType::AutoResetEvent); TestAssert(objectToWaitOn != nullptr); HANDLE objects[] = {objectToSignal, objectToWaitOn}; // Verify that a queued APC is not called if the wait is not alertable QueueUserAPC(&ApcCallback, GetCurrentThread(), (ULONG_PTR)&objects); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, false); TestAssert(waitResult == WAIT_OBJECT_0); TestAssert(!s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_TIMEOUT); // wait has occurred // Verify that signal, call APC, wait, occur in that order ResetEvent(objectToSignal); SetEvent(objectToWaitOn); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); TestAssert(waitResult == WAIT_IO_COMPLETION); TestAssert(s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_OBJECT_0); // wait has not occurred yet s_apcCalled = false; ResetEvent(objectToSignal); SetEvent(objectToWaitOn); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); TestAssert(waitResult == WAIT_OBJECT_0); TestAssert(!s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_TIMEOUT); // wait has occurred CloseHandle(objectToSignal); CloseHandle(objectToWaitOn); } catch (AssertionFailureException ex) { ex.signalableObjectType = SignalableObjectType::ManualResetEvent; ex.waitableObjectType = WaitableObjectType::AutoResetEvent; ex.waitResult = waitResult; throw ex; } } PALTEST(threading_SignalObjectAndWait_paltest_signalobjectandwaittest, "threading/SignalObjectAndWait/paltest_signalobjectandwaittest") { if (PAL_Initialize(argc, argv) != 0) { return FAIL; } int testReturnCode = PASS; try { Run(); } catch (AssertionFailureException ex) { printf( "SignalObjectAndWaitTest - Assertion failure (line %d, signalable object type %d, waitable object type %d, wait result 0x%x, error code %u): '%s'\n", ex.lineNumber, ex.signalableObjectType, ex.waitableObjectType, ex.waitResult, ex.errorCode, ex.expression); fflush(stdout); testReturnCode = FAIL; } PAL_TerminateEx(testReturnCode); return testReturnCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <palsuite.h> enum class SignalableObjectType { First = 0, Invalid = First, ManualResetEvent, AutoResetEvent, Semaphore, FullSemaphore, Mutex, UnlockedMutex, Last = UnlockedMutex }; enum class WaitableObjectType { First = 0, Invalid = First, ManualResetEvent, UnsignaledManualResetEvent, AutoResetEvent, UnsignaledAutoResetEvent, Semaphore, EmptySemaphore, Mutex, LockedMutex, Last = LockedMutex }; void operator ++(SignalableObjectType &objectType) { ++(int &)objectType; } void operator ++(WaitableObjectType &objectType) { ++(int &)objectType; } struct AssertionFailureException { const int lineNumber; const char *const expression; SignalableObjectType signalableObjectType; WaitableObjectType waitableObjectType; DWORD waitResult; DWORD errorCode; AssertionFailureException(int lineNumber, const char *expression) : lineNumber(lineNumber), expression(expression), signalableObjectType(SignalableObjectType::Invalid), waitableObjectType(WaitableObjectType::Invalid), waitResult(WAIT_OBJECT_0), errorCode(ERROR_SUCCESS) { } }; #define TestAssert(expression) \ do \ { \ if (!(expression)) \ { \ throw AssertionFailureException(__LINE__, "" #expression ""); \ } \ } while (false) HANDLE CreateObjectToSignal(SignalableObjectType objectType) { switch (objectType) { case SignalableObjectType::Invalid: return nullptr; case SignalableObjectType::ManualResetEvent: return CreateEvent(nullptr, true, false, nullptr); case SignalableObjectType::AutoResetEvent: return CreateEvent(nullptr, false, false, nullptr); case SignalableObjectType::Semaphore: return CreateSemaphoreExW(nullptr, 0, 1, nullptr, 0, 0); case SignalableObjectType::FullSemaphore: return CreateSemaphoreExW(nullptr, 1, 1, nullptr, 0, 0); case SignalableObjectType::Mutex: return CreateMutex(nullptr, true, nullptr); case SignalableObjectType::UnlockedMutex: return CreateMutex(nullptr, false, nullptr); default: TestAssert(false); } } void VerifySignal(HANDLE h, SignalableObjectType objectType) { switch (objectType) { case SignalableObjectType::ManualResetEvent: TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; case SignalableObjectType::AutoResetEvent: TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); SetEvent(h); break; case SignalableObjectType::Semaphore: TestAssert(!ReleaseSemaphore(h, 1, nullptr)); break; case SignalableObjectType::Mutex: TestAssert(!ReleaseMutex(h)); break; default: TestAssert(false); } } void CloseObjectToSignal(HANDLE h, SignalableObjectType objectType) { if (objectType != SignalableObjectType::Invalid) { CloseHandle(h); } } HANDLE CreateObjectToWaitOn(WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::Invalid: return nullptr; case WaitableObjectType::ManualResetEvent: return CreateEvent(nullptr, true, true, nullptr); case WaitableObjectType::UnsignaledManualResetEvent: return CreateEvent(nullptr, true, false, nullptr); case WaitableObjectType::AutoResetEvent: return CreateEvent(nullptr, false, true, nullptr); case WaitableObjectType::UnsignaledAutoResetEvent: return CreateEvent(nullptr, false, false, nullptr); case WaitableObjectType::Semaphore: return CreateSemaphoreExW(nullptr, 1, 1, nullptr, 0, 0); case WaitableObjectType::EmptySemaphore: return CreateSemaphoreExW(nullptr, 0, 1, nullptr, 0, 0); case WaitableObjectType::Mutex: return CreateMutex(nullptr, false, nullptr); case WaitableObjectType::LockedMutex: return CreateMutex(nullptr, true, nullptr); default: TestAssert(false); } } void VerifyWait(HANDLE h, WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::ManualResetEvent: case WaitableObjectType::UnsignaledManualResetEvent: break; case WaitableObjectType::AutoResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: case WaitableObjectType::Semaphore: case WaitableObjectType::EmptySemaphore: TestAssert(WaitForSingleObject(h, 0) == WAIT_TIMEOUT); break; case WaitableObjectType::Mutex: TestAssert(ReleaseMutex(h)); TestAssert(!ReleaseMutex(h)); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; case WaitableObjectType::LockedMutex: TestAssert(ReleaseMutex(h)); TestAssert(ReleaseMutex(h)); TestAssert(!ReleaseMutex(h)); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); TestAssert(WaitForSingleObject(h, 0) == WAIT_OBJECT_0); break; default: TestAssert(false); } } void CloseObjectToWaitOn(HANDLE h, WaitableObjectType objectType) { switch (objectType) { case WaitableObjectType::ManualResetEvent: case WaitableObjectType::UnsignaledManualResetEvent: case WaitableObjectType::AutoResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: CloseHandle(h); break; case WaitableObjectType::Semaphore: case WaitableObjectType::EmptySemaphore: ReleaseSemaphore(h, 1, nullptr); CloseHandle(h); break; case WaitableObjectType::Mutex: ReleaseMutex(h); CloseHandle(h); break; case WaitableObjectType::LockedMutex: ReleaseMutex(h); ReleaseMutex(h); CloseHandle(h); break; default: break; } } bool Verify(SignalableObjectType signalableObjectType, WaitableObjectType waitableObjectType, DWORD waitResult, DWORD errorCode) { if (signalableObjectType == SignalableObjectType::Invalid || waitableObjectType == WaitableObjectType::Invalid) { TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_INVALID_HANDLE); return false; } switch (signalableObjectType) { case SignalableObjectType::FullSemaphore: TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_TOO_MANY_POSTS); return false; case SignalableObjectType::UnlockedMutex: TestAssert(waitResult == WAIT_FAILED); TestAssert(errorCode == ERROR_NOT_OWNER); return false; default: break; } switch (waitableObjectType) { case WaitableObjectType::UnsignaledManualResetEvent: case WaitableObjectType::UnsignaledAutoResetEvent: case WaitableObjectType::EmptySemaphore: TestAssert(waitResult == WAIT_TIMEOUT); break; default: TestAssert(waitResult == WAIT_OBJECT_0); break; } TestAssert(errorCode == ERROR_SUCCESS); return true; } void Run(SignalableObjectType signalableObjectType, WaitableObjectType waitableObjectType) { HANDLE objectToSignal = CreateObjectToSignal(signalableObjectType); TestAssert(signalableObjectType == SignalableObjectType::Invalid || objectToSignal != nullptr); HANDLE objectToWaitOn = CreateObjectToWaitOn(waitableObjectType); TestAssert(waitableObjectType == WaitableObjectType::Invalid || objectToWaitOn != nullptr); DWORD waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); DWORD errorCode = waitResult == WAIT_FAILED ? GetLastError() : ERROR_SUCCESS; try { if (Verify(signalableObjectType, waitableObjectType, waitResult, errorCode)) { VerifySignal(objectToSignal, signalableObjectType); VerifyWait(objectToWaitOn, waitableObjectType); } } catch (AssertionFailureException ex) { ex.signalableObjectType = signalableObjectType; ex.waitableObjectType = waitableObjectType; ex.waitResult = waitResult; ex.errorCode = errorCode; throw ex; } } static bool s_apcCalled = false; void CALLBACK ApcCallback(ULONG_PTR dwParam) { s_apcCalled = true; HANDLE *objects = (HANDLE *)dwParam; HANDLE objectToSignal = objects[0]; HANDLE objectToWaitOn = objects[1]; TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_OBJECT_0); // wait has not occurred yet SetEvent(objectToWaitOn); } void Run() { for (SignalableObjectType signalableObjectType = SignalableObjectType::First; signalableObjectType <= SignalableObjectType::Last; ++signalableObjectType) { for (WaitableObjectType waitableObjectType = WaitableObjectType::First; waitableObjectType <= WaitableObjectType::Last; ++waitableObjectType) { Run(signalableObjectType, waitableObjectType); } } DWORD waitResult = WAIT_FAILED; try { HANDLE objectToSignal = CreateObjectToSignal(SignalableObjectType::ManualResetEvent); TestAssert(objectToSignal != nullptr); HANDLE objectToWaitOn = CreateObjectToWaitOn(WaitableObjectType::AutoResetEvent); TestAssert(objectToWaitOn != nullptr); HANDLE objects[] = {objectToSignal, objectToWaitOn}; // Verify that a queued APC is not called if the wait is not alertable QueueUserAPC(&ApcCallback, GetCurrentThread(), (ULONG_PTR)&objects); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, false); TestAssert(waitResult == WAIT_OBJECT_0); TestAssert(!s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_TIMEOUT); // wait has occurred // Verify that signal, call APC, wait, occur in that order ResetEvent(objectToSignal); SetEvent(objectToWaitOn); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); TestAssert(waitResult == WAIT_IO_COMPLETION); TestAssert(s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_OBJECT_0); // wait has not occurred yet s_apcCalled = false; ResetEvent(objectToSignal); SetEvent(objectToWaitOn); waitResult = SignalObjectAndWait(objectToSignal, objectToWaitOn, 0, true); TestAssert(waitResult == WAIT_OBJECT_0); TestAssert(!s_apcCalled); TestAssert(WaitForSingleObject(objectToSignal, 0) == WAIT_OBJECT_0); // signal has occurred TestAssert(WaitForSingleObject(objectToWaitOn, 0) == WAIT_TIMEOUT); // wait has occurred CloseHandle(objectToSignal); CloseHandle(objectToWaitOn); } catch (AssertionFailureException ex) { ex.signalableObjectType = SignalableObjectType::ManualResetEvent; ex.waitableObjectType = WaitableObjectType::AutoResetEvent; ex.waitResult = waitResult; throw ex; } } PALTEST(threading_SignalObjectAndWait_paltest_signalobjectandwaittest, "threading/SignalObjectAndWait/paltest_signalobjectandwaittest") { if (PAL_Initialize(argc, argv) != 0) { return FAIL; } int testReturnCode = PASS; try { Run(); } catch (AssertionFailureException ex) { printf( "SignalObjectAndWaitTest - Assertion failure (line %d, signalable object type %d, waitable object type %d, wait result 0x%x, error code %u): '%s'\n", ex.lineNumber, ex.signalableObjectType, ex.waitableObjectType, ex.waitResult, ex.errorCode, ex.expression); fflush(stdout); testReturnCode = FAIL; } PAL_TerminateEx(testReturnCode); return testReturnCode; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/native/corehost/fx_definition.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "deps_format.h" #include "fx_definition.h" #include "fx_ver.h" #include "pal.h" #include "runtime_config.h" fx_definition_t::fx_definition_t() { } fx_definition_t::fx_definition_t( const pal::string_t& name, const pal::string_t& dir, const pal::string_t& requested_version, const pal::string_t& found_version) : m_name(name) , m_dir(dir) , m_requested_version(requested_version) , m_found_version(found_version) { } void fx_definition_t::parse_runtime_config( const pal::string_t& path, const pal::string_t& dev_path, const runtime_config_t::settings_t& override_settings ) { m_runtime_config.parse(path, dev_path, override_settings); } void fx_definition_t::parse_deps() { m_deps.parse(false, m_deps_file); } void fx_definition_t::parse_deps(const deps_json_t::rid_fallback_graph_t& graph) { m_deps.parse(true, m_deps_file, graph); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "deps_format.h" #include "fx_definition.h" #include "fx_ver.h" #include "pal.h" #include "runtime_config.h" fx_definition_t::fx_definition_t() { } fx_definition_t::fx_definition_t( const pal::string_t& name, const pal::string_t& dir, const pal::string_t& requested_version, const pal::string_t& found_version) : m_name(name) , m_dir(dir) , m_requested_version(requested_version) , m_found_version(found_version) { } void fx_definition_t::parse_runtime_config( const pal::string_t& path, const pal::string_t& dev_path, const runtime_config_t::settings_t& override_settings ) { m_runtime_config.parse(path, dev_path, override_settings); } void fx_definition_t::parse_deps() { m_deps.parse(false, m_deps_file); } void fx_definition_t::parse_deps(const deps_json_t::rid_fallback_graph_t& graph) { m_deps.parse(true, m_deps_file, graph); }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/tests/profiler/native/multiple/multiple.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "multiple.h" #include <thread> #define MAX_PROFILERS 3 using std::thread; std::atomic<int> MultiplyLoaded::_exceptionThrownSeenCount(0); std::atomic<int> MultiplyLoaded::_detachCount(0); std::atomic<int> MultiplyLoaded::_failures(0); GUID MultiplyLoaded::GetClsid() { // {BFA8EF13-E144-49B9-B95C-FC1C150C7651} GUID clsid = { 0xBFA8EF13, 0xE144, 0x49B9, { 0xB9, 0x5C, 0xFC, 0x1C, 0x15, 0x0C, 0x76, 0x51 } }; return clsid; } HRESULT MultiplyLoaded::InitializeCommon(IUnknown* pICorProfilerInfoUnk) { Profiler::Initialize(pICorProfilerInfoUnk); HRESULT hr = S_OK; printf("Setting exception mask\n"); if (FAILED(hr = pCorProfilerInfo->SetEventMask2(COR_PRF_MONITOR_EXCEPTIONS, 0))) { _failures++; printf("FAIL: ICorProfilerInfo::SetEventMask2() failed hr=0x%x", hr); return hr; } return S_OK; } HRESULT MultiplyLoaded::Initialize(IUnknown* pICorProfilerInfoUnk) { return InitializeCommon(pICorProfilerInfoUnk); } HRESULT MultiplyLoaded::InitializeForAttach(IUnknown* pICorProfilerInfoUnk, void* pvClientData, UINT cbClientData) { return InitializeCommon(pICorProfilerInfoUnk); } HRESULT MultiplyLoaded::LoadAsNotificationOnly(BOOL *pbNotificationOnly) { *pbNotificationOnly = TRUE; return S_OK; } HRESULT MultiplyLoaded::ProfilerDetachSucceeded() { ++_detachCount; printf("ProfilerDetachSucceeded _detachCount=%d\n", _detachCount.load()); if (_detachCount == (MAX_PROFILERS - 1) && _exceptionThrownSeenCount >= (MAX_PROFILERS - 1) && _failures == 0) { printf("PROFILER TEST PASSES\n"); NotifyManagedCodeViaCallback(pCorProfilerInfo); } return S_OK; } HRESULT MultiplyLoaded::ExceptionThrown(ObjectID thrownObjectId) { int seen = _exceptionThrownSeenCount++; printf("MultiplyLoaded::ExceptionThrown, number seen = %d\n", seen); thread detachThread([&]() { printf("Requesting detach!!\n"); HRESULT hr = pCorProfilerInfo->RequestProfilerDetach(0); printf("RequestProfilerDetach hr=0x%x\n", hr); }); detachThread.detach(); return S_OK; } HRESULT MultiplyLoaded::Shutdown() { Profiler::Shutdown(); fflush(stdout); return S_OK; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "multiple.h" #include <thread> #define MAX_PROFILERS 3 using std::thread; std::atomic<int> MultiplyLoaded::_exceptionThrownSeenCount(0); std::atomic<int> MultiplyLoaded::_detachCount(0); std::atomic<int> MultiplyLoaded::_failures(0); GUID MultiplyLoaded::GetClsid() { // {BFA8EF13-E144-49B9-B95C-FC1C150C7651} GUID clsid = { 0xBFA8EF13, 0xE144, 0x49B9, { 0xB9, 0x5C, 0xFC, 0x1C, 0x15, 0x0C, 0x76, 0x51 } }; return clsid; } HRESULT MultiplyLoaded::InitializeCommon(IUnknown* pICorProfilerInfoUnk) { Profiler::Initialize(pICorProfilerInfoUnk); HRESULT hr = S_OK; printf("Setting exception mask\n"); if (FAILED(hr = pCorProfilerInfo->SetEventMask2(COR_PRF_MONITOR_EXCEPTIONS, 0))) { _failures++; printf("FAIL: ICorProfilerInfo::SetEventMask2() failed hr=0x%x", hr); return hr; } return S_OK; } HRESULT MultiplyLoaded::Initialize(IUnknown* pICorProfilerInfoUnk) { return InitializeCommon(pICorProfilerInfoUnk); } HRESULT MultiplyLoaded::InitializeForAttach(IUnknown* pICorProfilerInfoUnk, void* pvClientData, UINT cbClientData) { return InitializeCommon(pICorProfilerInfoUnk); } HRESULT MultiplyLoaded::LoadAsNotificationOnly(BOOL *pbNotificationOnly) { *pbNotificationOnly = TRUE; return S_OK; } HRESULT MultiplyLoaded::ProfilerDetachSucceeded() { ++_detachCount; printf("ProfilerDetachSucceeded _detachCount=%d\n", _detachCount.load()); if (_detachCount == (MAX_PROFILERS - 1) && _exceptionThrownSeenCount >= (MAX_PROFILERS - 1) && _failures == 0) { printf("PROFILER TEST PASSES\n"); NotifyManagedCodeViaCallback(pCorProfilerInfo); } return S_OK; } HRESULT MultiplyLoaded::ExceptionThrown(ObjectID thrownObjectId) { int seen = _exceptionThrownSeenCount++; printf("MultiplyLoaded::ExceptionThrown, number seen = %d\n", seen); thread detachThread([&]() { printf("Requesting detach!!\n"); HRESULT hr = pCorProfilerInfo->RequestProfilerDetach(0); printf("RequestProfilerDetach hr=0x%x\n", hr); }); detachThread.detach(); return S_OK; } HRESULT MultiplyLoaded::Shutdown() { Profiler::Shutdown(); fflush(stdout); return S_OK; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/vm/peassembly.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // PEAssembly.cpp // // -------------------------------------------------------------------------------- #include "common.h" #include "peassembly.h" #include "eecontract.h" #include "eeconfig.h" #include "eventtrace.h" #include "dbginterface.h" #include "peimagelayout.inl" #include "dlwrap.h" #include "invokeutil.h" #include "strongnameinternal.h" #include "../binder/inc/applicationcontext.hpp" #include "assemblybinderutil.h" #include "../binder/inc/assemblybindercommon.hpp" #include "sha1.h" #ifndef DACCESS_COMPILE //----------------------------------------------------------------------------------------------------- // Catch attempts to load x64 assemblies on x86, etc. //----------------------------------------------------------------------------------------------------- static void ValidatePEFileMachineType(PEAssembly *pPEAssembly) { STANDARD_VM_CONTRACT; if (pPEAssembly->IsDynamic()) return; // PEFiles for ReflectionEmit assemblies don't cache the machine type. DWORD peKind; DWORD actualMachineType; pPEAssembly->GetPEKindAndMachine(&peKind, &actualMachineType); if (actualMachineType == IMAGE_FILE_MACHINE_I386 && ((peKind & (peILonly | pe32BitRequired)) == peILonly)) return; // Image is marked CPU-agnostic. if (actualMachineType != IMAGE_FILE_MACHINE_NATIVE && actualMachineType != IMAGE_FILE_MACHINE_NATIVE_NI) { #ifdef TARGET_AMD64 // v4.0 64-bit compatibility workaround. The 64-bit v4.0 CLR's Reflection.Load(byte[]) api does not detect cpu-matches. We should consider fixing that in // the next SxS release. In the meantime, this bypass will retain compat for 64-bit v4.0 CLR for target platforms that existed at the time. // // Though this bypass kicks in for all Load() flavors, the other Load() flavors did detect cpu-matches through various other code paths that still exist. // Or to put it another way, this #ifdef makes the (4.5 only) ValidatePEFileMachineType() a NOP for x64, hence preserving 4.0 compatibility. if (actualMachineType == IMAGE_FILE_MACHINE_I386 || actualMachineType == IMAGE_FILE_MACHINE_IA64) return; #endif // BIT64_ // Image has required machine that doesn't match the CLR. StackSString name; pPEAssembly->GetDisplayName(name); COMPlusThrow(kBadImageFormatException, IDS_CLASSLOAD_WRONGCPU, name.GetUnicode()); } return; // If we got here, all is good. } void PEAssembly::EnsureLoaded() { CONTRACT_VOID { INSTANCE_CHECK; POSTCONDITION(IsLoaded()); STANDARD_VM_CHECK; } CONTRACT_END; if (IsDynamic()) RETURN; // Ensure that loaded layout is available. PEImageLayout* pLayout = GetPEImage()->GetOrCreateLayout(PEImageLayout::LAYOUT_LOADED); if (pLayout == NULL) { EEFileLoadException::Throw(this, COR_E_BADIMAGEFORMAT, NULL); } // Catch attempts to load x64 assemblies on x86, etc. ValidatePEFileMachineType(this); #if !defined(TARGET_64BIT) if (!GetPEImage()->Has32BitNTHeaders()) { // Tried to load 64-bit assembly on 32-bit platform. EEFileLoadException::Throw(this, COR_E_BADIMAGEFORMAT, NULL); } #endif RETURN; } // ------------------------------------------------------------ // Identity // ------------------------------------------------------------ BOOL PEAssembly::Equals(PEAssembly *pPEAssembly) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pPEAssembly)); GC_NOTRIGGER; NOTHROW; CANNOT_TAKE_LOCK; MODE_ANY; } CONTRACTL_END; // Same object is equal if (pPEAssembly == this) return TRUE; // Different host assemblies cannot be equal unless they are associated with the same host binder // It's ok if only one has a host binder because multiple threads can race to load the same assembly // and that may cause temporary candidate PEAssembly objects that never get bound to a host assembly // because another thread beats it; the losing thread will pick up the PEAssembly in the cache. if (pPEAssembly->HasHostAssembly() && this->HasHostAssembly()) { AssemblyBinder* otherBinder = pPEAssembly->GetHostAssembly()->GetBinder(); AssemblyBinder* thisBinder = this->GetHostAssembly()->GetBinder(); if (otherBinder != thisBinder || otherBinder == NULL) return FALSE; } // Same image is equal if (m_PEImage != NULL && pPEAssembly->m_PEImage != NULL && m_PEImage->Equals(pPEAssembly->m_PEImage)) return TRUE; return FALSE; } BOOL PEAssembly::Equals(PEImage *pImage) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pImage)); GC_NOTRIGGER; NOTHROW; MODE_ANY; } CONTRACTL_END; // Same image ==> equal if (pImage == m_PEImage) return TRUE; // Equal image ==> equal if (m_PEImage != NULL && m_PEImage->Equals(pImage)) return TRUE; return FALSE; } // ------------------------------------------------------------ // Descriptive strings // ------------------------------------------------------------ void PEAssembly::GetPathOrCodeBase(SString &result) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_PEImage != NULL && !m_PEImage->GetPath().IsEmpty()) { result.Set(m_PEImage->GetPath()); } else { GetCodeBase(result); } } // ------------------------------------------------------------ // Metadata access // ------------------------------------------------------------ PTR_CVOID PEAssembly::GetMetadata(COUNT_T *pSize) { CONTRACT(PTR_CVOID) { INSTANCE_CHECK; POSTCONDITION(CheckPointer(pSize, NULL_OK)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); THROWS; GC_TRIGGERS; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; if (IsDynamic() || !GetPEImage()->HasNTHeaders() || !GetPEImage()->HasCorHeader()) { if (pSize != NULL) *pSize = 0; RETURN NULL; } else { RETURN GetPEImage()->GetMetadata(pSize); } } #endif // #ifndef DACCESS_COMPILE PTR_CVOID PEAssembly::GetLoadedMetadata(COUNT_T *pSize) { CONTRACT(PTR_CVOID) { INSTANCE_CHECK; POSTCONDITION(CheckPointer(pSize, NULL_OK)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; if (!HasLoadedPEImage() || !GetLoadedLayout()->HasNTHeaders() || !GetLoadedLayout()->HasCorHeader()) { if (pSize != NULL) *pSize = 0; RETURN NULL; } else { RETURN GetLoadedLayout()->GetMetadata(pSize); } } TADDR PEAssembly::GetIL(RVA il) { CONTRACT(TADDR) { INSTANCE_CHECK; PRECONDITION(il != 0); PRECONDITION(!IsDynamic()); #ifndef DACCESS_COMPILE PRECONDITION(HasLoadedPEImage()); #endif POSTCONDITION(RETVAL != NULL); THROWS; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; PEImageLayout *image = NULL; image = GetLoadedLayout(); #ifndef DACCESS_COMPILE // Verify that the IL blob is valid before giving it out if (!image->CheckILMethod(il)) COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL_RANGE); #endif RETURN image->GetRvaData(il); } #ifndef DACCESS_COMPILE void PEAssembly::OpenImporter() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Make sure internal MD is in RW format. ConvertMDInternalToReadWrite(); IMetaDataImport2 *pIMDImport = NULL; IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetMDImport(), IID_IMetaDataImport2, (void **)&pIMDImport)); // Atomically swap it into the field (release it if we lose the race) if (FastInterlockCompareExchangePointer(&m_pImporter, pIMDImport, NULL) != NULL) pIMDImport->Release(); } void PEAssembly::ConvertMDInternalToReadWrite() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(EX_THROW(EEMessageException, (E_OUTOFMEMORY));); } CONTRACTL_END; IMDInternalImport *pOld; // Old (current RO) value of internal import. IMDInternalImport *pNew = NULL; // New (RW) value of internal import. // Take a local copy of *ppImport. This may be a pointer to an RO // or to an RW MDInternalXX. pOld = m_pMDImport; IMetaDataImport *pIMDImport = m_pImporter; if (pIMDImport != NULL) { HRESULT hr = GetMetaDataInternalInterfaceFromPublic(pIMDImport, IID_IMDInternalImport, (void **)&pNew); if (FAILED(hr)) { EX_THROW(EEMessageException, (hr)); } if (pNew == pOld) { pNew->Release(); return; } } else { // If an RO, convert to an RW, return S_OK. If already RW, no conversion // needed, return S_FALSE. HRESULT hr = ConvertMDInternalImport(pOld, &pNew); if (FAILED(hr)) { EX_THROW(EEMessageException, (hr)); } // If no conversion took place, don't change pointers. if (hr == S_FALSE) return; } // Swap the pointers in a thread safe manner. If the contents of *ppImport // equals pOld then no other thread got here first, and the old contents are // replaced with pNew. The old contents are returned. if (FastInterlockCompareExchangePointer(&m_pMDImport, pNew, pOld) == pOld) { //if the debugger queries, it will now see that we have RW metadata m_MDImportIsRW_Debugger_Use_Only = TRUE; // Swapped -- get the metadata to hang onto the old Internal import. HRESULT hr=m_pMDImport->SetUserContextData(pOld); _ASSERTE(SUCCEEDED(hr)||!"Leaking old MDImport"); IfFailThrow(hr); } else { // Some other thread finished first. Just free the results of this conversion. pNew->Release(); } } void PEAssembly::OpenMDImport() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pMDImport != NULL) return; if (!IsDynamic() && GetPEImage()->HasNTHeaders() && GetPEImage()->HasCorHeader()) { m_pMDImport=GetPEImage()->GetMDImport(); } else { ThrowHR(COR_E_BADIMAGEFORMAT); } _ASSERTE(m_pMDImport); m_pMDImport->AddRef(); } void PEAssembly::OpenEmitter() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Make sure internal MD is in RW format. ConvertMDInternalToReadWrite(); IMetaDataEmit *pIMDEmit = NULL; IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetMDImport(), IID_IMetaDataEmit, (void **)&pIMDEmit)); // Atomically swap it into the field (release it if we lose the race) if (FastInterlockCompareExchangePointer(&m_pEmitter, pIMDEmit, NULL) != NULL) pIMDEmit->Release(); } // ------------------------------------------------------------ // PE file access // ------------------------------------------------------------ // Note that most of these APIs are currently passed through // to the main image. However, in the near future they will // be rerouted to the native image in the prejitted case so // we can avoid using the original IL image. #endif //!DACCESS_COMPILE #ifndef DACCESS_COMPILE // ------------------------------------------------------------ // Resource access // ------------------------------------------------------------ void PEAssembly::GetEmbeddedResource(DWORD dwOffset, DWORD *cbResource, PBYTE *pbInMemoryResource) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(ThrowOutOfMemory();); } CONTRACTL_END; PEImage* image = GetPEImage(); PEImageLayout* theImage = image->GetOrCreateLayout(PEImageLayout::LAYOUT_ANY); if (!theImage->CheckResource(dwOffset)) ThrowHR(COR_E_BADIMAGEFORMAT); COUNT_T size; const void *resource = theImage->GetResource(dwOffset, &size); *cbResource = size; *pbInMemoryResource = (PBYTE) resource; } // ------------------------------------------------------------ // File loading // ------------------------------------------------------------ PEAssembly* PEAssembly::LoadAssembly(mdAssemblyRef kAssemblyRef) { CONTRACT(PEAssembly *) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; POSTCONDITION(CheckPointer(RETVAL)); INJECT_FAULT(COMPlusThrowOM();); } CONTRACT_END; IMDInternalImport* pImport = GetMDImport(); if (((TypeFromToken(kAssemblyRef) != mdtAssembly) && (TypeFromToken(kAssemblyRef) != mdtAssemblyRef)) || (!pImport->IsValidToken(kAssemblyRef))) { ThrowHR(COR_E_BADIMAGEFORMAT); } AssemblySpec spec; spec.InitializeSpec(kAssemblyRef, pImport, GetAppDomain()->FindAssembly(this)); RETURN GetAppDomain()->BindAssemblySpec(&spec, TRUE); } BOOL PEAssembly::GetResource(LPCSTR szName, DWORD *cbResource, PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef, LPCSTR *szFileName, DWORD *dwLocation, BOOL fSkipRaiseResolveEvent, DomainAssembly* pDomainAssembly, AppDomain* pAppDomain) { CONTRACTL { INSTANCE_CHECK; THROWS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); WRAPPER(GC_TRIGGERS); } CONTRACTL_END; mdToken mdLinkRef; DWORD dwResourceFlags; DWORD dwOffset; mdManifestResource mdResource; Assembly* pAssembly = NULL; PEAssembly* pPEAssembly = NULL; IMDInternalImport* pImport = GetMDImport(); if (SUCCEEDED(pImport->FindManifestResourceByName(szName, &mdResource))) { pPEAssembly = this; IfFailThrow(pImport->GetManifestResourceProps( mdResource, NULL, //&szName, &mdLinkRef, &dwOffset, &dwResourceFlags)); } else { if (fSkipRaiseResolveEvent || pAppDomain == NULL) return FALSE; DomainAssembly* pParentAssembly = GetAppDomain()->FindAssembly(this); pAssembly = pAppDomain->RaiseResourceResolveEvent(pParentAssembly, szName); if (pAssembly == NULL) return FALSE; pDomainAssembly = pAssembly->GetDomainAssembly(); pPEAssembly = pDomainAssembly->GetPEAssembly(); if (FAILED(pAssembly->GetMDImport()->FindManifestResourceByName( szName, &mdResource))) { return FALSE; } if (dwLocation != 0) { if (pAssemblyRef != NULL) *pAssemblyRef = pDomainAssembly; *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly } IfFailThrow(pPEAssembly->GetMDImport()->GetManifestResourceProps( mdResource, NULL, //&szName, &mdLinkRef, &dwOffset, &dwResourceFlags)); } switch(TypeFromToken(mdLinkRef)) { case mdtAssemblyRef: { if (pDomainAssembly == NULL) return FALSE; AssemblySpec spec; spec.InitializeSpec(mdLinkRef, GetMDImport(), pDomainAssembly); pDomainAssembly = spec.LoadDomainAssembly(FILE_LOADED); if (dwLocation) { if (pAssemblyRef) *pAssemblyRef = pDomainAssembly; *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly } return pDomainAssembly->GetResource(szName, cbResource, pbInMemoryResource, pAssemblyRef, szFileName, dwLocation, fSkipRaiseResolveEvent); } case mdtFile: if (mdLinkRef == mdFileNil) { // The resource is embedded in the manifest file if (dwLocation) { *dwLocation = *dwLocation | 5; // ResourceLocation.embedded | // ResourceLocation.containedInManifestFile return TRUE; } pPEAssembly->GetEmbeddedResource(dwOffset, cbResource, pbInMemoryResource); return TRUE; } return FALSE; default: ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_IN_MANIFESTRES); } } void PEAssembly::GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine) { WRAPPER_NO_CONTRACT; _ASSERTE(pdwKind != NULL && pdwMachine != NULL); if (IsDynamic()) { *pdwKind = 0; *pdwMachine = 0; return; } GetPEImage()->GetPEKindAndMachine(pdwKind, pdwMachine); return; } ULONG PEAssembly::GetPEImageTimeDateStamp() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; return GetLoadedLayout()->GetTimeDateStamp(); } #ifndef DACCESS_COMPILE PEAssembly::PEAssembly( BINDER_SPACE::Assembly* pBindResultInfo, IMetaDataEmit* pEmit, BOOL isSystem, PEImage * pPEImage /*= NULL*/, BINDER_SPACE::Assembly * pHostAssembly /*= NULL*/) { CONTRACTL { CONSTRUCTOR_CHECK; PRECONDITION(CheckPointer(pEmit, NULL_OK)); PRECONDITION(pBindResultInfo == NULL || pPEImage == NULL); STANDARD_VM_CHECK; } CONTRACTL_END; #if _DEBUG m_pDebugName = NULL; #endif m_PEImage = NULL; m_MDImportIsRW_Debugger_Use_Only = FALSE; m_pMDImport = NULL; m_pImporter = NULL; m_pEmitter = NULL; m_refCount = 1; m_isSystem = isSystem; m_pHostAssembly = nullptr; m_pFallbackBinder = nullptr; pPEImage = pBindResultInfo ? pBindResultInfo->GetPEImage() : pPEImage; if (pPEImage) { _ASSERTE(pPEImage->CheckUniqueInstance()); pPEImage->AddRef(); // We require an open layout for the file. // Most likely we have one already, just make sure we have one. pPEImage->GetOrCreateLayout(PEImageLayout::LAYOUT_ANY); m_PEImage = pPEImage; } // Open metadata eagerly to minimize failure windows if (pEmit == NULL) OpenMDImport(); //constructor, cannot race with anything else { IfFailThrow(GetMetaDataInternalInterfaceFromPublic(pEmit, IID_IMDInternalImport, (void **)&m_pMDImport)); m_pEmitter = pEmit; pEmit->AddRef(); m_MDImportIsRW_Debugger_Use_Only = TRUE; } // m_pMDImport can be external // Make sure this is an assembly if (!m_pMDImport->IsValidToken(TokenFromRid(1, mdtAssembly))) ThrowHR(COR_E_ASSEMBLYEXPECTED); // Verify name eagerly LPCUTF8 szName = GetSimpleName(); if (!*szName) { ThrowHR(COR_E_BADIMAGEFORMAT, BFA_EMPTY_ASSEMDEF_NAME); } // Set the host assembly and binding context as the AssemblySpec initialization // for CoreCLR will expect to have it set. if (pHostAssembly != nullptr) { m_pHostAssembly = clr::SafeAddRef(pHostAssembly); } if(pBindResultInfo != nullptr) { // Cannot have both pHostAssembly and a coreclr based bind _ASSERTE(pHostAssembly == nullptr); pBindResultInfo = clr::SafeAddRef(pBindResultInfo); m_pHostAssembly = pBindResultInfo; } #if _DEBUG GetPathOrCodeBase(m_debugName); m_debugName.Normalize(); m_pDebugName = m_debugName; #endif } #endif // !DACCESS_COMPILE PEAssembly *PEAssembly::Open( PEImage * pPEImageIL, BINDER_SPACE::Assembly * pHostAssembly) { STANDARD_VM_CONTRACT; PEAssembly * pPEAssembly = new PEAssembly( nullptr, // BindResult nullptr, // IMetaDataEmit FALSE, // isSystem pPEImageIL, pHostAssembly); return pPEAssembly; } PEAssembly::~PEAssembly() { CONTRACTL { DESTRUCTOR_CHECK; NOTHROW; GC_TRIGGERS; // Fusion uses crsts on AddRef/Release MODE_ANY; } CONTRACTL_END; GCX_PREEMP(); if (m_pImporter != NULL) { m_pImporter->Release(); m_pImporter = NULL; } if (m_pEmitter != NULL) { m_pEmitter->Release(); m_pEmitter = NULL; } if (m_pMDImport != NULL) { m_pMDImport->Release(); m_pMDImport = NULL; } if (m_PEImage != NULL) m_PEImage->Release(); if (m_pHostAssembly != NULL) m_pHostAssembly->Release(); } /* static */ PEAssembly *PEAssembly::OpenSystem() { STANDARD_VM_CONTRACT; PEAssembly *result = NULL; EX_TRY { result = DoOpenSystem(); } EX_HOOK { Exception *ex = GET_EXCEPTION(); // Rethrow non-transient exceptions as file load exceptions with proper // context if (!ex->IsTransient()) EEFileLoadException::Throw(SystemDomain::System()->BaseLibrary(), ex->GetHR(), ex); } EX_END_HOOK; return result; } /* static */ PEAssembly *PEAssembly::DoOpenSystem() { CONTRACT(PEAssembly *) { POSTCONDITION(CheckPointer(RETVAL)); STANDARD_VM_CHECK; } CONTRACT_END; ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1); ReleaseHolder<BINDER_SPACE::Assembly> pBoundAssembly; IfFailThrow(GetAppDomain()->GetDefaultBinder()->BindToSystem(&pBoundAssembly)); RETURN new PEAssembly(pBoundAssembly, NULL, TRUE); } PEAssembly* PEAssembly::Open(BINDER_SPACE::Assembly* pBindResult) { return new PEAssembly(pBindResult,NULL,/*isSystem*/ false); }; /* static */ PEAssembly *PEAssembly::Create(IMetaDataAssemblyEmit *pAssemblyEmit) { CONTRACT(PEAssembly *) { PRECONDITION(CheckPointer(pAssemblyEmit)); STANDARD_VM_CHECK; POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; // Set up the metadata pointers in the PEAssembly. (This is the only identity // we have.) SafeComHolder<IMetaDataEmit> pEmit; pAssemblyEmit->QueryInterface(IID_IMetaDataEmit, (void **)&pEmit); RETURN new PEAssembly(NULL, pEmit, FALSE); } #endif // #ifndef DACCESS_COMPILE #ifndef DACCESS_COMPILE // Supports implementation of the legacy Assembly.CodeBase property. // Returns false if the assembly was loaded from a bundle, true otherwise BOOL PEAssembly::GetCodeBase(SString &result) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; PEImage* ilImage = GetPEImage(); if (ilImage != NULL && !ilImage->IsInBundle()) { // All other cases use the file path. result.Set(ilImage->GetPath()); if (!result.IsEmpty()) PathToUrl(result); return TRUE; } else { result.Set(SString::Empty()); return FALSE; } } /* static */ void PEAssembly::PathToUrl(SString &string) { CONTRACTL { PRECONDITION(PEImage::CheckCanonicalFullPath(string)); THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; SString::Iterator i = string.Begin(); #if !defined(TARGET_UNIX) if (i[0] == W('\\')) { // Network path string.Insert(i, SL("file://")); string.Skip(i, SL("file://")); } else { // Disk path string.Insert(i, SL("file:///")); string.Skip(i, SL("file:///")); } #else // Unix doesn't have a distinction between a network or a local path _ASSERTE( i[0] == W('\\') || i[0] == W('/')); SString sss(SString::Literal, W("file://")); string.Insert(i, sss); string.Skip(i, sss); #endif while (string.Find(i, W('\\'))) { string.Replace(i, W('/')); } } void PEAssembly::UrlToPath(SString &string) { CONTRACT_VOID { THROWS; GC_NOTRIGGER; } CONTRACT_END; SString::Iterator i = string.Begin(); SString sss2(SString::Literal, W("file://")); #if !defined(TARGET_UNIX) SString sss3(SString::Literal, W("file:///")); if (string.MatchCaseInsensitive(i, sss3)) string.Delete(i, 8); else #endif if (string.MatchCaseInsensitive(i, sss2)) string.Delete(i, 7); while (string.Find(i, W('/'))) { string.Replace(i, W('\\')); } RETURN; } BOOL PEAssembly::FindLastPathSeparator(const SString &path, SString::Iterator &i) { #ifdef TARGET_UNIX SString::Iterator slash = i; SString::Iterator backSlash = i; BOOL foundSlash = path.FindBack(slash, '/'); BOOL foundBackSlash = path.FindBack(backSlash, '\\'); if (!foundSlash && !foundBackSlash) return FALSE; else if (foundSlash && !foundBackSlash) i = slash; else if (!foundSlash && foundBackSlash) i = backSlash; else i = (backSlash > slash) ? backSlash : slash; return TRUE; #else return path.FindBack(i, '\\'); #endif //TARGET_UNIX } // ------------------------------------------------------------ // Metadata access // ------------------------------------------------------------ HRESULT PEAssembly::GetVersion(USHORT *pMajor, USHORT *pMinor, USHORT *pBuild, USHORT *pRevision) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pMajor, NULL_OK)); PRECONDITION(CheckPointer(pMinor, NULL_OK)); PRECONDITION(CheckPointer(pBuild, NULL_OK)); PRECONDITION(CheckPointer(pRevision, NULL_OK)); NOTHROW; WRAPPER(GC_TRIGGERS); MODE_ANY; } CONTRACTL_END; _ASSERTE(GetMDImport()->IsValidToken(TokenFromRid(1, mdtAssembly))); HRESULT hr = S_OK;; AssemblyMetaDataInternal md; IfFailRet(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, &md, NULL)); if (pMajor != NULL) *pMajor = md.usMajorVersion; if (pMinor != NULL) *pMinor = md.usMinorVersion; if (pBuild != NULL) *pBuild = md.usBuildNumber; if (pRevision != NULL) *pRevision = md.usRevisionNumber; return S_OK; } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void PEAssembly::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; DAC_ENUM_DTHIS(); EMEM_OUT(("MEM: %p PEAssembly\n", dac_cast<TADDR>(this))); #ifdef _DEBUG // Not a big deal if it's NULL or fails. m_debugName.EnumMemoryRegions(flags); #endif if (m_PEImage.IsValid()) { m_PEImage->EnumMemoryRegions(flags); } } #endif // #ifdef DACCESS_COMPILE //------------------------------------------------------------------------------- // Make best-case effort to obtain an image name for use in an error message. // // This routine must expect to be called before the this object is fully loaded. // It can return an empty if the name isn't available or the object isn't initialized // enough to get a name, but it mustn't crash. //------------------------------------------------------------------------------- LPCWSTR PEAssembly::GetPathForErrorMessages() { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); SUPPORTS_DAC_HOST_ONLY; } CONTRACTL_END if (!IsDynamic()) { return m_PEImage->GetPathForErrorMessages(); } else { return W(""); } } #ifdef DACCESS_COMPILE TADDR PEAssembly::GetMDInternalRWAddress() { if (!m_MDImportIsRW_Debugger_Use_Only) return 0; else { // This line of code is a bit scary, but it is correct for now at least... // 1) We are using 'm_pMDImport_Use_Accessor' directly, and not the accessor. The field is // named this way to prevent debugger code that wants a host implementation of IMDInternalImport // from accidentally trying to use this pointer. This pointer is a target pointer, not // a host pointer. However in this function we do want the target pointer, so the usage is // accurate. // 2) ASSUMPTION: We are assuming that the only valid implementation of RW metadata is // MDInternalRW. If that ever changes we would need some way to disambiguate, and // probably this entire code path would need to be redesigned. // 3) ASSUMPTION: We are assuming that no pointer adjustment is required to convert between // IMDInternalImport*, IMDInternalImportENC* and MDInternalRW*. Ideally I was hoping to do this with a // static_cast<> but the compiler complains that the ENC<->RW is an unrelated conversion. return (TADDR)m_pMDImport_UseAccessor; } } #endif // Returns the AssemblyBinder* instance associated with the PEAssembly PTR_AssemblyBinder PEAssembly::GetAssemblyBinder() { LIMITED_METHOD_CONTRACT; PTR_AssemblyBinder pBinder = NULL; BINDER_SPACE::Assembly* pHostAssembly = GetHostAssembly(); if (pHostAssembly) { pBinder = dac_cast<PTR_AssemblyBinder>(pHostAssembly->GetBinder()); } else { // If we do not have a host assembly, check if we are dealing with // a dynamically emitted assembly and if so, use its fallback load context // binder reference. if (IsDynamic()) { pBinder = GetFallbackBinder(); } } return pBinder; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // PEAssembly.cpp // // -------------------------------------------------------------------------------- #include "common.h" #include "peassembly.h" #include "eecontract.h" #include "eeconfig.h" #include "eventtrace.h" #include "dbginterface.h" #include "peimagelayout.inl" #include "dlwrap.h" #include "invokeutil.h" #include "strongnameinternal.h" #include "../binder/inc/applicationcontext.hpp" #include "assemblybinderutil.h" #include "../binder/inc/assemblybindercommon.hpp" #include "sha1.h" #ifndef DACCESS_COMPILE //----------------------------------------------------------------------------------------------------- // Catch attempts to load x64 assemblies on x86, etc. //----------------------------------------------------------------------------------------------------- static void ValidatePEFileMachineType(PEAssembly *pPEAssembly) { STANDARD_VM_CONTRACT; if (pPEAssembly->IsDynamic()) return; // PEFiles for ReflectionEmit assemblies don't cache the machine type. DWORD peKind; DWORD actualMachineType; pPEAssembly->GetPEKindAndMachine(&peKind, &actualMachineType); if (actualMachineType == IMAGE_FILE_MACHINE_I386 && ((peKind & (peILonly | pe32BitRequired)) == peILonly)) return; // Image is marked CPU-agnostic. if (actualMachineType != IMAGE_FILE_MACHINE_NATIVE && actualMachineType != IMAGE_FILE_MACHINE_NATIVE_NI) { #ifdef TARGET_AMD64 // v4.0 64-bit compatibility workaround. The 64-bit v4.0 CLR's Reflection.Load(byte[]) api does not detect cpu-matches. We should consider fixing that in // the next SxS release. In the meantime, this bypass will retain compat for 64-bit v4.0 CLR for target platforms that existed at the time. // // Though this bypass kicks in for all Load() flavors, the other Load() flavors did detect cpu-matches through various other code paths that still exist. // Or to put it another way, this #ifdef makes the (4.5 only) ValidatePEFileMachineType() a NOP for x64, hence preserving 4.0 compatibility. if (actualMachineType == IMAGE_FILE_MACHINE_I386 || actualMachineType == IMAGE_FILE_MACHINE_IA64) return; #endif // BIT64_ // Image has required machine that doesn't match the CLR. StackSString name; pPEAssembly->GetDisplayName(name); COMPlusThrow(kBadImageFormatException, IDS_CLASSLOAD_WRONGCPU, name.GetUnicode()); } return; // If we got here, all is good. } void PEAssembly::EnsureLoaded() { CONTRACT_VOID { INSTANCE_CHECK; POSTCONDITION(IsLoaded()); STANDARD_VM_CHECK; } CONTRACT_END; if (IsDynamic()) RETURN; // Ensure that loaded layout is available. PEImageLayout* pLayout = GetPEImage()->GetOrCreateLayout(PEImageLayout::LAYOUT_LOADED); if (pLayout == NULL) { EEFileLoadException::Throw(this, COR_E_BADIMAGEFORMAT, NULL); } // Catch attempts to load x64 assemblies on x86, etc. ValidatePEFileMachineType(this); #if !defined(TARGET_64BIT) if (!GetPEImage()->Has32BitNTHeaders()) { // Tried to load 64-bit assembly on 32-bit platform. EEFileLoadException::Throw(this, COR_E_BADIMAGEFORMAT, NULL); } #endif RETURN; } // ------------------------------------------------------------ // Identity // ------------------------------------------------------------ BOOL PEAssembly::Equals(PEAssembly *pPEAssembly) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pPEAssembly)); GC_NOTRIGGER; NOTHROW; CANNOT_TAKE_LOCK; MODE_ANY; } CONTRACTL_END; // Same object is equal if (pPEAssembly == this) return TRUE; // Different host assemblies cannot be equal unless they are associated with the same host binder // It's ok if only one has a host binder because multiple threads can race to load the same assembly // and that may cause temporary candidate PEAssembly objects that never get bound to a host assembly // because another thread beats it; the losing thread will pick up the PEAssembly in the cache. if (pPEAssembly->HasHostAssembly() && this->HasHostAssembly()) { AssemblyBinder* otherBinder = pPEAssembly->GetHostAssembly()->GetBinder(); AssemblyBinder* thisBinder = this->GetHostAssembly()->GetBinder(); if (otherBinder != thisBinder || otherBinder == NULL) return FALSE; } // Same image is equal if (m_PEImage != NULL && pPEAssembly->m_PEImage != NULL && m_PEImage->Equals(pPEAssembly->m_PEImage)) return TRUE; return FALSE; } BOOL PEAssembly::Equals(PEImage *pImage) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pImage)); GC_NOTRIGGER; NOTHROW; MODE_ANY; } CONTRACTL_END; // Same image ==> equal if (pImage == m_PEImage) return TRUE; // Equal image ==> equal if (m_PEImage != NULL && m_PEImage->Equals(pImage)) return TRUE; return FALSE; } // ------------------------------------------------------------ // Descriptive strings // ------------------------------------------------------------ void PEAssembly::GetPathOrCodeBase(SString &result) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_PEImage != NULL && !m_PEImage->GetPath().IsEmpty()) { result.Set(m_PEImage->GetPath()); } else { GetCodeBase(result); } } // ------------------------------------------------------------ // Metadata access // ------------------------------------------------------------ PTR_CVOID PEAssembly::GetMetadata(COUNT_T *pSize) { CONTRACT(PTR_CVOID) { INSTANCE_CHECK; POSTCONDITION(CheckPointer(pSize, NULL_OK)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); THROWS; GC_TRIGGERS; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; if (IsDynamic() || !GetPEImage()->HasNTHeaders() || !GetPEImage()->HasCorHeader()) { if (pSize != NULL) *pSize = 0; RETURN NULL; } else { RETURN GetPEImage()->GetMetadata(pSize); } } #endif // #ifndef DACCESS_COMPILE PTR_CVOID PEAssembly::GetLoadedMetadata(COUNT_T *pSize) { CONTRACT(PTR_CVOID) { INSTANCE_CHECK; POSTCONDITION(CheckPointer(pSize, NULL_OK)); POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); NOTHROW; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; if (!HasLoadedPEImage() || !GetLoadedLayout()->HasNTHeaders() || !GetLoadedLayout()->HasCorHeader()) { if (pSize != NULL) *pSize = 0; RETURN NULL; } else { RETURN GetLoadedLayout()->GetMetadata(pSize); } } TADDR PEAssembly::GetIL(RVA il) { CONTRACT(TADDR) { INSTANCE_CHECK; PRECONDITION(il != 0); PRECONDITION(!IsDynamic()); #ifndef DACCESS_COMPILE PRECONDITION(HasLoadedPEImage()); #endif POSTCONDITION(RETVAL != NULL); THROWS; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACT_END; PEImageLayout *image = NULL; image = GetLoadedLayout(); #ifndef DACCESS_COMPILE // Verify that the IL blob is valid before giving it out if (!image->CheckILMethod(il)) COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL_RANGE); #endif RETURN image->GetRvaData(il); } #ifndef DACCESS_COMPILE void PEAssembly::OpenImporter() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Make sure internal MD is in RW format. ConvertMDInternalToReadWrite(); IMetaDataImport2 *pIMDImport = NULL; IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetMDImport(), IID_IMetaDataImport2, (void **)&pIMDImport)); // Atomically swap it into the field (release it if we lose the race) if (FastInterlockCompareExchangePointer(&m_pImporter, pIMDImport, NULL) != NULL) pIMDImport->Release(); } void PEAssembly::ConvertMDInternalToReadWrite() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(EX_THROW(EEMessageException, (E_OUTOFMEMORY));); } CONTRACTL_END; IMDInternalImport *pOld; // Old (current RO) value of internal import. IMDInternalImport *pNew = NULL; // New (RW) value of internal import. // Take a local copy of *ppImport. This may be a pointer to an RO // or to an RW MDInternalXX. pOld = m_pMDImport; IMetaDataImport *pIMDImport = m_pImporter; if (pIMDImport != NULL) { HRESULT hr = GetMetaDataInternalInterfaceFromPublic(pIMDImport, IID_IMDInternalImport, (void **)&pNew); if (FAILED(hr)) { EX_THROW(EEMessageException, (hr)); } if (pNew == pOld) { pNew->Release(); return; } } else { // If an RO, convert to an RW, return S_OK. If already RW, no conversion // needed, return S_FALSE. HRESULT hr = ConvertMDInternalImport(pOld, &pNew); if (FAILED(hr)) { EX_THROW(EEMessageException, (hr)); } // If no conversion took place, don't change pointers. if (hr == S_FALSE) return; } // Swap the pointers in a thread safe manner. If the contents of *ppImport // equals pOld then no other thread got here first, and the old contents are // replaced with pNew. The old contents are returned. if (FastInterlockCompareExchangePointer(&m_pMDImport, pNew, pOld) == pOld) { //if the debugger queries, it will now see that we have RW metadata m_MDImportIsRW_Debugger_Use_Only = TRUE; // Swapped -- get the metadata to hang onto the old Internal import. HRESULT hr=m_pMDImport->SetUserContextData(pOld); _ASSERTE(SUCCEEDED(hr)||!"Leaking old MDImport"); IfFailThrow(hr); } else { // Some other thread finished first. Just free the results of this conversion. pNew->Release(); } } void PEAssembly::OpenMDImport() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; if (m_pMDImport != NULL) return; if (!IsDynamic() && GetPEImage()->HasNTHeaders() && GetPEImage()->HasCorHeader()) { m_pMDImport=GetPEImage()->GetMDImport(); } else { ThrowHR(COR_E_BADIMAGEFORMAT); } _ASSERTE(m_pMDImport); m_pMDImport->AddRef(); } void PEAssembly::OpenEmitter() { CONTRACTL { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; // Make sure internal MD is in RW format. ConvertMDInternalToReadWrite(); IMetaDataEmit *pIMDEmit = NULL; IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetMDImport(), IID_IMetaDataEmit, (void **)&pIMDEmit)); // Atomically swap it into the field (release it if we lose the race) if (FastInterlockCompareExchangePointer(&m_pEmitter, pIMDEmit, NULL) != NULL) pIMDEmit->Release(); } // ------------------------------------------------------------ // PE file access // ------------------------------------------------------------ // Note that most of these APIs are currently passed through // to the main image. However, in the near future they will // be rerouted to the native image in the prejitted case so // we can avoid using the original IL image. #endif //!DACCESS_COMPILE #ifndef DACCESS_COMPILE // ------------------------------------------------------------ // Resource access // ------------------------------------------------------------ void PEAssembly::GetEmbeddedResource(DWORD dwOffset, DWORD *cbResource, PBYTE *pbInMemoryResource) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(ThrowOutOfMemory();); } CONTRACTL_END; PEImage* image = GetPEImage(); PEImageLayout* theImage = image->GetOrCreateLayout(PEImageLayout::LAYOUT_ANY); if (!theImage->CheckResource(dwOffset)) ThrowHR(COR_E_BADIMAGEFORMAT); COUNT_T size; const void *resource = theImage->GetResource(dwOffset, &size); *cbResource = size; *pbInMemoryResource = (PBYTE) resource; } // ------------------------------------------------------------ // File loading // ------------------------------------------------------------ PEAssembly* PEAssembly::LoadAssembly(mdAssemblyRef kAssemblyRef) { CONTRACT(PEAssembly *) { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; POSTCONDITION(CheckPointer(RETVAL)); INJECT_FAULT(COMPlusThrowOM();); } CONTRACT_END; IMDInternalImport* pImport = GetMDImport(); if (((TypeFromToken(kAssemblyRef) != mdtAssembly) && (TypeFromToken(kAssemblyRef) != mdtAssemblyRef)) || (!pImport->IsValidToken(kAssemblyRef))) { ThrowHR(COR_E_BADIMAGEFORMAT); } AssemblySpec spec; spec.InitializeSpec(kAssemblyRef, pImport, GetAppDomain()->FindAssembly(this)); RETURN GetAppDomain()->BindAssemblySpec(&spec, TRUE); } BOOL PEAssembly::GetResource(LPCSTR szName, DWORD *cbResource, PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef, LPCSTR *szFileName, DWORD *dwLocation, BOOL fSkipRaiseResolveEvent, DomainAssembly* pDomainAssembly, AppDomain* pAppDomain) { CONTRACTL { INSTANCE_CHECK; THROWS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); WRAPPER(GC_TRIGGERS); } CONTRACTL_END; mdToken mdLinkRef; DWORD dwResourceFlags; DWORD dwOffset; mdManifestResource mdResource; Assembly* pAssembly = NULL; PEAssembly* pPEAssembly = NULL; IMDInternalImport* pImport = GetMDImport(); if (SUCCEEDED(pImport->FindManifestResourceByName(szName, &mdResource))) { pPEAssembly = this; IfFailThrow(pImport->GetManifestResourceProps( mdResource, NULL, //&szName, &mdLinkRef, &dwOffset, &dwResourceFlags)); } else { if (fSkipRaiseResolveEvent || pAppDomain == NULL) return FALSE; DomainAssembly* pParentAssembly = GetAppDomain()->FindAssembly(this); pAssembly = pAppDomain->RaiseResourceResolveEvent(pParentAssembly, szName); if (pAssembly == NULL) return FALSE; pDomainAssembly = pAssembly->GetDomainAssembly(); pPEAssembly = pDomainAssembly->GetPEAssembly(); if (FAILED(pAssembly->GetMDImport()->FindManifestResourceByName( szName, &mdResource))) { return FALSE; } if (dwLocation != 0) { if (pAssemblyRef != NULL) *pAssemblyRef = pDomainAssembly; *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly } IfFailThrow(pPEAssembly->GetMDImport()->GetManifestResourceProps( mdResource, NULL, //&szName, &mdLinkRef, &dwOffset, &dwResourceFlags)); } switch(TypeFromToken(mdLinkRef)) { case mdtAssemblyRef: { if (pDomainAssembly == NULL) return FALSE; AssemblySpec spec; spec.InitializeSpec(mdLinkRef, GetMDImport(), pDomainAssembly); pDomainAssembly = spec.LoadDomainAssembly(FILE_LOADED); if (dwLocation) { if (pAssemblyRef) *pAssemblyRef = pDomainAssembly; *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly } return pDomainAssembly->GetResource(szName, cbResource, pbInMemoryResource, pAssemblyRef, szFileName, dwLocation, fSkipRaiseResolveEvent); } case mdtFile: if (mdLinkRef == mdFileNil) { // The resource is embedded in the manifest file if (dwLocation) { *dwLocation = *dwLocation | 5; // ResourceLocation.embedded | // ResourceLocation.containedInManifestFile return TRUE; } pPEAssembly->GetEmbeddedResource(dwOffset, cbResource, pbInMemoryResource); return TRUE; } return FALSE; default: ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_IN_MANIFESTRES); } } void PEAssembly::GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine) { WRAPPER_NO_CONTRACT; _ASSERTE(pdwKind != NULL && pdwMachine != NULL); if (IsDynamic()) { *pdwKind = 0; *pdwMachine = 0; return; } GetPEImage()->GetPEKindAndMachine(pdwKind, pdwMachine); return; } ULONG PEAssembly::GetPEImageTimeDateStamp() { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; return GetLoadedLayout()->GetTimeDateStamp(); } #ifndef DACCESS_COMPILE PEAssembly::PEAssembly( BINDER_SPACE::Assembly* pBindResultInfo, IMetaDataEmit* pEmit, BOOL isSystem, PEImage * pPEImage /*= NULL*/, BINDER_SPACE::Assembly * pHostAssembly /*= NULL*/) { CONTRACTL { CONSTRUCTOR_CHECK; PRECONDITION(CheckPointer(pEmit, NULL_OK)); PRECONDITION(pBindResultInfo == NULL || pPEImage == NULL); STANDARD_VM_CHECK; } CONTRACTL_END; #if _DEBUG m_pDebugName = NULL; #endif m_PEImage = NULL; m_MDImportIsRW_Debugger_Use_Only = FALSE; m_pMDImport = NULL; m_pImporter = NULL; m_pEmitter = NULL; m_refCount = 1; m_isSystem = isSystem; m_pHostAssembly = nullptr; m_pFallbackBinder = nullptr; pPEImage = pBindResultInfo ? pBindResultInfo->GetPEImage() : pPEImage; if (pPEImage) { _ASSERTE(pPEImage->CheckUniqueInstance()); pPEImage->AddRef(); // We require an open layout for the file. // Most likely we have one already, just make sure we have one. pPEImage->GetOrCreateLayout(PEImageLayout::LAYOUT_ANY); m_PEImage = pPEImage; } // Open metadata eagerly to minimize failure windows if (pEmit == NULL) OpenMDImport(); //constructor, cannot race with anything else { IfFailThrow(GetMetaDataInternalInterfaceFromPublic(pEmit, IID_IMDInternalImport, (void **)&m_pMDImport)); m_pEmitter = pEmit; pEmit->AddRef(); m_MDImportIsRW_Debugger_Use_Only = TRUE; } // m_pMDImport can be external // Make sure this is an assembly if (!m_pMDImport->IsValidToken(TokenFromRid(1, mdtAssembly))) ThrowHR(COR_E_ASSEMBLYEXPECTED); // Verify name eagerly LPCUTF8 szName = GetSimpleName(); if (!*szName) { ThrowHR(COR_E_BADIMAGEFORMAT, BFA_EMPTY_ASSEMDEF_NAME); } // Set the host assembly and binding context as the AssemblySpec initialization // for CoreCLR will expect to have it set. if (pHostAssembly != nullptr) { m_pHostAssembly = clr::SafeAddRef(pHostAssembly); } if(pBindResultInfo != nullptr) { // Cannot have both pHostAssembly and a coreclr based bind _ASSERTE(pHostAssembly == nullptr); pBindResultInfo = clr::SafeAddRef(pBindResultInfo); m_pHostAssembly = pBindResultInfo; } #if _DEBUG GetPathOrCodeBase(m_debugName); m_debugName.Normalize(); m_pDebugName = m_debugName; #endif } #endif // !DACCESS_COMPILE PEAssembly *PEAssembly::Open( PEImage * pPEImageIL, BINDER_SPACE::Assembly * pHostAssembly) { STANDARD_VM_CONTRACT; PEAssembly * pPEAssembly = new PEAssembly( nullptr, // BindResult nullptr, // IMetaDataEmit FALSE, // isSystem pPEImageIL, pHostAssembly); return pPEAssembly; } PEAssembly::~PEAssembly() { CONTRACTL { DESTRUCTOR_CHECK; NOTHROW; GC_TRIGGERS; // Fusion uses crsts on AddRef/Release MODE_ANY; } CONTRACTL_END; GCX_PREEMP(); if (m_pImporter != NULL) { m_pImporter->Release(); m_pImporter = NULL; } if (m_pEmitter != NULL) { m_pEmitter->Release(); m_pEmitter = NULL; } if (m_pMDImport != NULL) { m_pMDImport->Release(); m_pMDImport = NULL; } if (m_PEImage != NULL) m_PEImage->Release(); if (m_pHostAssembly != NULL) m_pHostAssembly->Release(); } /* static */ PEAssembly *PEAssembly::OpenSystem() { STANDARD_VM_CONTRACT; PEAssembly *result = NULL; EX_TRY { result = DoOpenSystem(); } EX_HOOK { Exception *ex = GET_EXCEPTION(); // Rethrow non-transient exceptions as file load exceptions with proper // context if (!ex->IsTransient()) EEFileLoadException::Throw(SystemDomain::System()->BaseLibrary(), ex->GetHR(), ex); } EX_END_HOOK; return result; } /* static */ PEAssembly *PEAssembly::DoOpenSystem() { CONTRACT(PEAssembly *) { POSTCONDITION(CheckPointer(RETVAL)); STANDARD_VM_CHECK; } CONTRACT_END; ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1); ReleaseHolder<BINDER_SPACE::Assembly> pBoundAssembly; IfFailThrow(GetAppDomain()->GetDefaultBinder()->BindToSystem(&pBoundAssembly)); RETURN new PEAssembly(pBoundAssembly, NULL, TRUE); } PEAssembly* PEAssembly::Open(BINDER_SPACE::Assembly* pBindResult) { return new PEAssembly(pBindResult,NULL,/*isSystem*/ false); }; /* static */ PEAssembly *PEAssembly::Create(IMetaDataAssemblyEmit *pAssemblyEmit) { CONTRACT(PEAssembly *) { PRECONDITION(CheckPointer(pAssemblyEmit)); STANDARD_VM_CHECK; POSTCONDITION(CheckPointer(RETVAL)); } CONTRACT_END; // Set up the metadata pointers in the PEAssembly. (This is the only identity // we have.) SafeComHolder<IMetaDataEmit> pEmit; pAssemblyEmit->QueryInterface(IID_IMetaDataEmit, (void **)&pEmit); RETURN new PEAssembly(NULL, pEmit, FALSE); } #endif // #ifndef DACCESS_COMPILE #ifndef DACCESS_COMPILE // Supports implementation of the legacy Assembly.CodeBase property. // Returns false if the assembly was loaded from a bundle, true otherwise BOOL PEAssembly::GetCodeBase(SString &result) { CONTRACTL { INSTANCE_CHECK; THROWS; GC_TRIGGERS; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; PEImage* ilImage = GetPEImage(); if (ilImage != NULL && !ilImage->IsInBundle()) { // All other cases use the file path. result.Set(ilImage->GetPath()); if (!result.IsEmpty()) PathToUrl(result); return TRUE; } else { result.Set(SString::Empty()); return FALSE; } } /* static */ void PEAssembly::PathToUrl(SString &string) { CONTRACTL { PRECONDITION(PEImage::CheckCanonicalFullPath(string)); THROWS; GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; SString::Iterator i = string.Begin(); #if !defined(TARGET_UNIX) if (i[0] == W('\\')) { // Network path string.Insert(i, SL("file://")); string.Skip(i, SL("file://")); } else { // Disk path string.Insert(i, SL("file:///")); string.Skip(i, SL("file:///")); } #else // Unix doesn't have a distinction between a network or a local path _ASSERTE( i[0] == W('\\') || i[0] == W('/')); SString sss(SString::Literal, W("file://")); string.Insert(i, sss); string.Skip(i, sss); #endif while (string.Find(i, W('\\'))) { string.Replace(i, W('/')); } } void PEAssembly::UrlToPath(SString &string) { CONTRACT_VOID { THROWS; GC_NOTRIGGER; } CONTRACT_END; SString::Iterator i = string.Begin(); SString sss2(SString::Literal, W("file://")); #if !defined(TARGET_UNIX) SString sss3(SString::Literal, W("file:///")); if (string.MatchCaseInsensitive(i, sss3)) string.Delete(i, 8); else #endif if (string.MatchCaseInsensitive(i, sss2)) string.Delete(i, 7); while (string.Find(i, W('/'))) { string.Replace(i, W('\\')); } RETURN; } BOOL PEAssembly::FindLastPathSeparator(const SString &path, SString::Iterator &i) { #ifdef TARGET_UNIX SString::Iterator slash = i; SString::Iterator backSlash = i; BOOL foundSlash = path.FindBack(slash, '/'); BOOL foundBackSlash = path.FindBack(backSlash, '\\'); if (!foundSlash && !foundBackSlash) return FALSE; else if (foundSlash && !foundBackSlash) i = slash; else if (!foundSlash && foundBackSlash) i = backSlash; else i = (backSlash > slash) ? backSlash : slash; return TRUE; #else return path.FindBack(i, '\\'); #endif //TARGET_UNIX } // ------------------------------------------------------------ // Metadata access // ------------------------------------------------------------ HRESULT PEAssembly::GetVersion(USHORT *pMajor, USHORT *pMinor, USHORT *pBuild, USHORT *pRevision) { CONTRACTL { INSTANCE_CHECK; PRECONDITION(CheckPointer(pMajor, NULL_OK)); PRECONDITION(CheckPointer(pMinor, NULL_OK)); PRECONDITION(CheckPointer(pBuild, NULL_OK)); PRECONDITION(CheckPointer(pRevision, NULL_OK)); NOTHROW; WRAPPER(GC_TRIGGERS); MODE_ANY; } CONTRACTL_END; _ASSERTE(GetMDImport()->IsValidToken(TokenFromRid(1, mdtAssembly))); HRESULT hr = S_OK;; AssemblyMetaDataInternal md; IfFailRet(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, &md, NULL)); if (pMajor != NULL) *pMajor = md.usMajorVersion; if (pMinor != NULL) *pMinor = md.usMinorVersion; if (pBuild != NULL) *pBuild = md.usBuildNumber; if (pRevision != NULL) *pRevision = md.usRevisionNumber; return S_OK; } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void PEAssembly::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; DAC_ENUM_DTHIS(); EMEM_OUT(("MEM: %p PEAssembly\n", dac_cast<TADDR>(this))); #ifdef _DEBUG // Not a big deal if it's NULL or fails. m_debugName.EnumMemoryRegions(flags); #endif if (m_PEImage.IsValid()) { m_PEImage->EnumMemoryRegions(flags); } } #endif // #ifdef DACCESS_COMPILE //------------------------------------------------------------------------------- // Make best-case effort to obtain an image name for use in an error message. // // This routine must expect to be called before the this object is fully loaded. // It can return an empty if the name isn't available or the object isn't initialized // enough to get a name, but it mustn't crash. //------------------------------------------------------------------------------- LPCWSTR PEAssembly::GetPathForErrorMessages() { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); SUPPORTS_DAC_HOST_ONLY; } CONTRACTL_END if (!IsDynamic()) { return m_PEImage->GetPathForErrorMessages(); } else { return W(""); } } #ifdef DACCESS_COMPILE TADDR PEAssembly::GetMDInternalRWAddress() { if (!m_MDImportIsRW_Debugger_Use_Only) return 0; else { // This line of code is a bit scary, but it is correct for now at least... // 1) We are using 'm_pMDImport_Use_Accessor' directly, and not the accessor. The field is // named this way to prevent debugger code that wants a host implementation of IMDInternalImport // from accidentally trying to use this pointer. This pointer is a target pointer, not // a host pointer. However in this function we do want the target pointer, so the usage is // accurate. // 2) ASSUMPTION: We are assuming that the only valid implementation of RW metadata is // MDInternalRW. If that ever changes we would need some way to disambiguate, and // probably this entire code path would need to be redesigned. // 3) ASSUMPTION: We are assuming that no pointer adjustment is required to convert between // IMDInternalImport*, IMDInternalImportENC* and MDInternalRW*. Ideally I was hoping to do this with a // static_cast<> but the compiler complains that the ENC<->RW is an unrelated conversion. return (TADDR)m_pMDImport_UseAccessor; } } #endif // Returns the AssemblyBinder* instance associated with the PEAssembly PTR_AssemblyBinder PEAssembly::GetAssemblyBinder() { LIMITED_METHOD_CONTRACT; PTR_AssemblyBinder pBinder = NULL; BINDER_SPACE::Assembly* pHostAssembly = GetHostAssembly(); if (pHostAssembly) { pBinder = dac_cast<PTR_AssemblyBinder>(pHostAssembly->GetBinder()); } else { // If we do not have a host assembly, check if we are dealing with // a dynamically emitted assembly and if so, use its fallback load context // binder reference. if (IsDynamic()) { pBinder = GetFallbackBinder(); } } return pBinder; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/pal/tests/palsuite/eventprovider/EnableEventLogging.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: EnableEnventLogging.cpp ** ** Purpose: Fix linker issue on platforms where the PAL is built against ** verion 2.4 of liblttng-ust-dev ** ** **===================================================================*/ #if defined(HOST_UNIX) // This is a wrapper method for LTTng. See https://github.com/dotnet/coreclr/pull/27273 for details. extern "C" bool XplatEventLoggerIsEnabled() { // As we are testing the lttng events here, enable them unconditionally. return true; } #endif // HOST_UNIX
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: EnableEnventLogging.cpp ** ** Purpose: Fix linker issue on platforms where the PAL is built against ** verion 2.4 of liblttng-ust-dev ** ** **===================================================================*/ #if defined(HOST_UNIX) // This is a wrapper method for LTTng. See https://github.com/dotnet/coreclr/pull/27273 for details. extern "C" bool XplatEventLoggerIsEnabled() { // As we are testing the lttng events here, enable them unconditionally. return true; } #endif // HOST_UNIX
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/native/libs/System.Security.Cryptography.Native.Apple/pal_keychain_macos.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_types.h" #include "pal_compiler.h" #include <Security/Security.h> /* Get a CFRetain()ed SecKeychainRef value for the keychain to which the keychain item belongs. The behavior of this function is undefined if `item` is not a CFTypeRef. For types that are not understood by this function to be keychain items an invalid parameter error is returned. Errors of the item having no keychain are suppressed, returning success (0) with *pKeychainOut set to NULL. For all other situations, see SecKeychainItemCopyKeychain documentation. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainItemCopyKeychain(SecKeychainItemRef item, SecKeychainRef* pKeychainOut); /* Create a keychain at the specified location with a given (UTF-8 encoded) lock passphrase. Returns the result of SecKeychainCreate. Output: pKeychainOut: The SecKeychainRef created by this function */ PALEXPORT int32_t AppleCryptoNative_SecKeychainCreate(const char* pathName, uint32_t passphraseLength, const uint8_t* passphraseUtf8, SecKeychainRef* pKeychainOut); /* Delete a keychain, including the file on disk. Returns the result of SecKeychainDelete */ PALEXPORT int32_t AppleCryptoNative_SecKeychainDelete(SecKeychainRef keychain); /* Open the default keychain. This is usually login.keychain, but can be adjusted by the user. Returns the result of SecKeychainCopyDefault. Output: pKeyChainOut: Receives the SecKeychainRef for the default keychain. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainCopyDefault(SecKeychainRef* pKeychainOut); /* Open the named keychain (full path to the file). Returns the result of SecKeychainOpen. Output: pKeychainOut: Receives the SecKeychainRef for the named keychain. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainOpen(const char* pszKeychainPath, SecKeychainRef* pKeychainOut); /* Unlock an opened keychain with a given (UTF-8 encoded) lock passphrase. Returns the result of SecKeychainUnlock. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainUnlock(SecKeychainRef keychain, uint32_t passphraseLength, const uint8_t* passphraseUtf8); /* Set a keychain to never (automatically) lock. Returns the result of SecKeychainSetSettings to a never-auto-lock policy. */ PALEXPORT int32_t AppleCryptoNative_SetKeychainNeverLock(SecKeychainRef keychain); /* Enumerate the certificate objects within the given keychain. Returns 1 on success (including "no certs found"), 0 on failure, any other value for invalid state. Output: pCertsOut: When the return value is not 1, NULL. Otherwise NULL on "no certs found", or a CFArrayRef for the matches (including a single match). pOSStatus: Receives the last OSStatus value. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainEnumerateCerts(SecKeychainRef keychain, CFArrayRef* pCertsOut, int32_t* pOSStatus); /* Enumerate the certificate objects within the given keychain. Returns 1 on success (including "no certs found"), 0 on failure, any other value for invalid state. Note that any identity will also necessarily be returned as a certificate with no private key by SecKeychainEnumerateCerts. De-duplication of values is the responsibility of the caller. Output: pCertsOut: When the return value is not 1, NULL. Otherwise NULL on "no certs found", or a CFArrayRef for the matches (including a single match). pOSStatus: Receives the last OSStatus value. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainEnumerateIdentities(SecKeychainRef keychain, CFArrayRef* pIdentitiesOut, int32_t* pOSStatus); /* Add a certificate from the specified keychain. Returns 0 on failure -> see OSStatus 1 on success any other value is invalid Output: pOSStatus: Receives the last OSStatus value.. */ PALEXPORT int32_t AppleCryptoNative_X509StoreAddCertificate(CFTypeRef certOrIdentity, SecKeychainRef keychain, int32_t* pOSStatus); /* Remove a certificate from the specified keychain. Returns 0 on failure -> see OSStatus 1 on success (including no item to delete), 2 on blocking user trust modification, 3 on blocking system trust modification, 4 on deleting an existing certificate while in read only mode, any other value is invalid Output: pOSStatus: Receives the last OSStatus value.. */ PALEXPORT int32_t AppleCryptoNative_X509StoreRemoveCertificate(CFTypeRef certOrIdentity, SecKeychainRef keychain, uint8_t isReadOnlyMode, int32_t* pOSStatus);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_types.h" #include "pal_compiler.h" #include <Security/Security.h> /* Get a CFRetain()ed SecKeychainRef value for the keychain to which the keychain item belongs. The behavior of this function is undefined if `item` is not a CFTypeRef. For types that are not understood by this function to be keychain items an invalid parameter error is returned. Errors of the item having no keychain are suppressed, returning success (0) with *pKeychainOut set to NULL. For all other situations, see SecKeychainItemCopyKeychain documentation. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainItemCopyKeychain(SecKeychainItemRef item, SecKeychainRef* pKeychainOut); /* Create a keychain at the specified location with a given (UTF-8 encoded) lock passphrase. Returns the result of SecKeychainCreate. Output: pKeychainOut: The SecKeychainRef created by this function */ PALEXPORT int32_t AppleCryptoNative_SecKeychainCreate(const char* pathName, uint32_t passphraseLength, const uint8_t* passphraseUtf8, SecKeychainRef* pKeychainOut); /* Delete a keychain, including the file on disk. Returns the result of SecKeychainDelete */ PALEXPORT int32_t AppleCryptoNative_SecKeychainDelete(SecKeychainRef keychain); /* Open the default keychain. This is usually login.keychain, but can be adjusted by the user. Returns the result of SecKeychainCopyDefault. Output: pKeyChainOut: Receives the SecKeychainRef for the default keychain. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainCopyDefault(SecKeychainRef* pKeychainOut); /* Open the named keychain (full path to the file). Returns the result of SecKeychainOpen. Output: pKeychainOut: Receives the SecKeychainRef for the named keychain. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainOpen(const char* pszKeychainPath, SecKeychainRef* pKeychainOut); /* Unlock an opened keychain with a given (UTF-8 encoded) lock passphrase. Returns the result of SecKeychainUnlock. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainUnlock(SecKeychainRef keychain, uint32_t passphraseLength, const uint8_t* passphraseUtf8); /* Set a keychain to never (automatically) lock. Returns the result of SecKeychainSetSettings to a never-auto-lock policy. */ PALEXPORT int32_t AppleCryptoNative_SetKeychainNeverLock(SecKeychainRef keychain); /* Enumerate the certificate objects within the given keychain. Returns 1 on success (including "no certs found"), 0 on failure, any other value for invalid state. Output: pCertsOut: When the return value is not 1, NULL. Otherwise NULL on "no certs found", or a CFArrayRef for the matches (including a single match). pOSStatus: Receives the last OSStatus value. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainEnumerateCerts(SecKeychainRef keychain, CFArrayRef* pCertsOut, int32_t* pOSStatus); /* Enumerate the certificate objects within the given keychain. Returns 1 on success (including "no certs found"), 0 on failure, any other value for invalid state. Note that any identity will also necessarily be returned as a certificate with no private key by SecKeychainEnumerateCerts. De-duplication of values is the responsibility of the caller. Output: pCertsOut: When the return value is not 1, NULL. Otherwise NULL on "no certs found", or a CFArrayRef for the matches (including a single match). pOSStatus: Receives the last OSStatus value. */ PALEXPORT int32_t AppleCryptoNative_SecKeychainEnumerateIdentities(SecKeychainRef keychain, CFArrayRef* pIdentitiesOut, int32_t* pOSStatus); /* Add a certificate from the specified keychain. Returns 0 on failure -> see OSStatus 1 on success any other value is invalid Output: pOSStatus: Receives the last OSStatus value.. */ PALEXPORT int32_t AppleCryptoNative_X509StoreAddCertificate(CFTypeRef certOrIdentity, SecKeychainRef keychain, int32_t* pOSStatus); /* Remove a certificate from the specified keychain. Returns 0 on failure -> see OSStatus 1 on success (including no item to delete), 2 on blocking user trust modification, 3 on blocking system trust modification, 4 on deleting an existing certificate while in read only mode, any other value is invalid Output: pOSStatus: Receives the last OSStatus value.. */ PALEXPORT int32_t AppleCryptoNative_X509StoreRemoveCertificate(CFTypeRef certOrIdentity, SecKeychainRef keychain, uint8_t isReadOnlyMode, int32_t* pOSStatus);
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/native/corehost/ijwhost/amd64/bootstrap_thunk.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "bootstrap_thunk.h" #include "corhdr.h" namespace { // 49 BA 78 56 34 12 78 56 34 12 mov r10,1234567812345678h // 49 BB 34 12 34 12 34 12 34 12 mov r11,1234123412341234h // 41 FF E3 jmp r11 BYTE mov_r10_instruction[2] = {0x49, 0xBA}; BYTE mov_r11_instruction[2] = {0x49, 0xBB}; BYTE jmp_r11_instruction[3] = {0x41, 0xFF, 0xE3}; } //================================================================================= // Get thunk from the return address that the call instruction would have pushed bootstrap_thunk *bootstrap_thunk::get_thunk_from_cookie(std::uintptr_t cookie) { return (bootstrap_thunk*)cookie; } //================================================================================= // bootstrap_thunk *bootstrap_thunk::get_thunk_from_entrypoint(std::uintptr_t entryAddr) { return (bootstrap_thunk *) ((std::uintptr_t)entryAddr - offsetof(bootstrap_thunk, m_mov_r10)); } //================================================================================= // Returns the slot address of the vtable entry for this thunk std::uintptr_t *bootstrap_thunk::get_slot_address() { return m_slot; } //================================================================================= // Returns the pal::dll_t for this thunk's module pal::dll_t bootstrap_thunk::get_dll_handle() { return m_dll; } //================================================================================= // Returns the token of this thunk std::uint32_t bootstrap_thunk::get_token() { return m_token; } //================================================================================= std::uintptr_t bootstrap_thunk::get_entrypoint() { return (std::uintptr_t)&m_mov_r10[0]; } //================================================================================= // Initializes the thunk to point to the bootstrap helper that will load the // runtime and perform the real thunk initialization. // void bootstrap_thunk::initialize(std::uintptr_t pThunkInitFcn, pal::dll_t dll, std::uint32_t token, std::uintptr_t *pSlot) { // Initialize the jump thunk. memcpy(&m_mov_r10[0], &mov_r10_instruction[0], sizeof(mov_r10_instruction)); (*((void **)&m_val_r10[0])) = (void *)this; memcpy(&m_mov_r11[0], &mov_r11_instruction[0], sizeof(mov_r11_instruction)); (*((void **)&m_val_r11[0])) = (void *)pThunkInitFcn; memcpy(&m_jmp_r11[0], &jmp_r11_instruction[0], sizeof(jmp_r11_instruction)); // Fill out the rest of the info m_token = token; m_dll = dll; m_slot = pSlot; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "bootstrap_thunk.h" #include "corhdr.h" namespace { // 49 BA 78 56 34 12 78 56 34 12 mov r10,1234567812345678h // 49 BB 34 12 34 12 34 12 34 12 mov r11,1234123412341234h // 41 FF E3 jmp r11 BYTE mov_r10_instruction[2] = {0x49, 0xBA}; BYTE mov_r11_instruction[2] = {0x49, 0xBB}; BYTE jmp_r11_instruction[3] = {0x41, 0xFF, 0xE3}; } //================================================================================= // Get thunk from the return address that the call instruction would have pushed bootstrap_thunk *bootstrap_thunk::get_thunk_from_cookie(std::uintptr_t cookie) { return (bootstrap_thunk*)cookie; } //================================================================================= // bootstrap_thunk *bootstrap_thunk::get_thunk_from_entrypoint(std::uintptr_t entryAddr) { return (bootstrap_thunk *) ((std::uintptr_t)entryAddr - offsetof(bootstrap_thunk, m_mov_r10)); } //================================================================================= // Returns the slot address of the vtable entry for this thunk std::uintptr_t *bootstrap_thunk::get_slot_address() { return m_slot; } //================================================================================= // Returns the pal::dll_t for this thunk's module pal::dll_t bootstrap_thunk::get_dll_handle() { return m_dll; } //================================================================================= // Returns the token of this thunk std::uint32_t bootstrap_thunk::get_token() { return m_token; } //================================================================================= std::uintptr_t bootstrap_thunk::get_entrypoint() { return (std::uintptr_t)&m_mov_r10[0]; } //================================================================================= // Initializes the thunk to point to the bootstrap helper that will load the // runtime and perform the real thunk initialization. // void bootstrap_thunk::initialize(std::uintptr_t pThunkInitFcn, pal::dll_t dll, std::uint32_t token, std::uintptr_t *pSlot) { // Initialize the jump thunk. memcpy(&m_mov_r10[0], &mov_r10_instruction[0], sizeof(mov_r10_instruction)); (*((void **)&m_val_r10[0])) = (void *)this; memcpy(&m_mov_r11[0], &mov_r11_instruction[0], sizeof(mov_r11_instruction)); (*((void **)&m_val_r11[0])) = (void *)pThunkInitFcn; memcpy(&m_jmp_r11[0], &jmp_r11_instruction[0], sizeof(jmp_r11_instruction)); // Fill out the rest of the info m_token = token; m_dll = dll; m_slot = pSlot; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/native/corehost/bundle/info.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "trace.h" #include "info.h" #include "utils.h" using namespace bundle; // Global single-file bundle information, if any const info_t* info_t::the_app = nullptr; info_t::info_t(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) : m_bundle_path(bundle_path) , m_bundle_size(0) , m_header_offset(header_offset) , m_header(0, 0, 0) { m_base_path = get_directory(m_bundle_path); // Single-file bundles currently only support deps/runtime config json files // named based on the app.dll. Any other name for these configuration files // mentioned via the command line are assumed to be actual files on disk. // // Supporting custom names for these config files is straightforward (with associated changes in bundler and SDK). // There is no known use-case for it yet, and the facility is TBD. m_deps_json = config_t(get_deps_from_app_binary(m_base_path, app_path)); m_runtimeconfig_json = config_t(get_runtime_config_path(m_base_path, get_filename_without_ext(app_path))); } StatusCode info_t::process_bundle(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) { if (header_offset == 0) { // Not a single-file bundle. return StatusCode::Success; } static info_t info(bundle_path, app_path, header_offset); StatusCode status = info.process_header(); if (status != StatusCode::Success) { return status; } trace::info(_X("Single-File bundle details:")); trace::info(_X("DepsJson Offset:[%lx] Size[%lx]"), info.m_header.deps_json_location().offset, info.m_header.deps_json_location().size); trace::info(_X("RuntimeConfigJson Offset:[%lx] Size[%lx]"), info.m_header.runtimeconfig_json_location().offset, info.m_header.runtimeconfig_json_location().size); trace::info(_X(".net core 3 compatibility mode: [%s]"), info.m_header.is_netcoreapp3_compat_mode() ? _X("Yes") : _X("No")); the_app = &info; return StatusCode::Success; } StatusCode info_t::process_header() { try { const char* addr = map_bundle(); reader_t reader(addr, m_bundle_size, m_header_offset); m_offset_in_file = reader.offset_in_file(); m_header = header_t::read(reader); m_deps_json.set_location(&m_header.deps_json_location()); m_runtimeconfig_json.set_location(&m_header.runtimeconfig_json_location()); unmap_bundle(addr); return StatusCode::Success; } catch (StatusCode e) { return e; } } char* info_t::config_t::map(const pal::string_t& path, const location_t* &location) { assert(is_single_file_bundle()); const bundle::info_t* app = bundle::info_t::the_app; if (app->m_deps_json.matches(path)) { location = app->m_deps_json.m_location; } else if (app->m_runtimeconfig_json.matches(path)) { location = app->m_runtimeconfig_json.m_location; } else { return nullptr; } // When necessary to map the deps.json or runtimeconfig.json files, we map the whole single-file bundle, // and return the address at the appropriate offset. // This is because: // * The host is the only code that is currently running and trying to map the bundle. // * Files can only be memory mapped at page-aligned offsets, and in whole page units. // Therefore, mapping only portions of the bundle will involve align-down/round-up calculations, and associated offset adjustments. // We choose the simpler approach of rounding to the whole file // * There is no performance limitation due to a larger sized mapping, since we actually only read the pages with relevant contents. // * Files that are too large to be mapped (ex: that exhaust 32-bit virtual address space) are not supported. #ifdef _WIN32 // Since we can't use in-situ parsing on Windows, as JSON data is encoded in // UTF-8 and the host expects wide strings. // We do not need COW and read-only mapping will be enough. char* addr = (char*)pal::mmap_read(app->m_bundle_path); #else // _WIN32 char* addr = (char*)pal::mmap_copy_on_write(app->m_bundle_path); #endif // _WIN32 if (addr == nullptr) { trace::error(_X("Failure processing application bundle.")); trace::error(_X("Failed to map bundle file [%s]"), path.c_str()); } trace::info(_X("Mapped bundle for [%s]"), path.c_str()); return addr + location->offset + app->m_offset_in_file; } void info_t::config_t::unmap(const char* addr, const location_t* location) { // Adjust to the beginning of the bundle. const bundle::info_t* app = bundle::info_t::the_app; addr -= location->offset - app->m_offset_in_file; bundle::info_t::the_app->unmap_bundle(addr); } const char* info_t::map_bundle() { const void *addr = pal::mmap_read(m_bundle_path, &m_bundle_size); if (addr == nullptr) { trace::error(_X("Failure processing application bundle.")); trace::error(_X("Couldn't memory map the bundle file for reading.")); throw StatusCode::BundleExtractionIOError; } trace::info(_X("Mapped application bundle")); return (const char *)addr; } void info_t::unmap_bundle(const char* addr) const { if (!pal::munmap((void*)addr, m_bundle_size)) { trace::warning(_X("Failed to unmap bundle after extraction.")); } else { trace::info(_X("Unmapped application bundle")); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "trace.h" #include "info.h" #include "utils.h" using namespace bundle; // Global single-file bundle information, if any const info_t* info_t::the_app = nullptr; info_t::info_t(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) : m_bundle_path(bundle_path) , m_bundle_size(0) , m_header_offset(header_offset) , m_header(0, 0, 0) { m_base_path = get_directory(m_bundle_path); // Single-file bundles currently only support deps/runtime config json files // named based on the app.dll. Any other name for these configuration files // mentioned via the command line are assumed to be actual files on disk. // // Supporting custom names for these config files is straightforward (with associated changes in bundler and SDK). // There is no known use-case for it yet, and the facility is TBD. m_deps_json = config_t(get_deps_from_app_binary(m_base_path, app_path)); m_runtimeconfig_json = config_t(get_runtime_config_path(m_base_path, get_filename_without_ext(app_path))); } StatusCode info_t::process_bundle(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) { if (header_offset == 0) { // Not a single-file bundle. return StatusCode::Success; } static info_t info(bundle_path, app_path, header_offset); StatusCode status = info.process_header(); if (status != StatusCode::Success) { return status; } trace::info(_X("Single-File bundle details:")); trace::info(_X("DepsJson Offset:[%lx] Size[%lx]"), info.m_header.deps_json_location().offset, info.m_header.deps_json_location().size); trace::info(_X("RuntimeConfigJson Offset:[%lx] Size[%lx]"), info.m_header.runtimeconfig_json_location().offset, info.m_header.runtimeconfig_json_location().size); trace::info(_X(".net core 3 compatibility mode: [%s]"), info.m_header.is_netcoreapp3_compat_mode() ? _X("Yes") : _X("No")); the_app = &info; return StatusCode::Success; } StatusCode info_t::process_header() { try { const char* addr = map_bundle(); reader_t reader(addr, m_bundle_size, m_header_offset); m_offset_in_file = reader.offset_in_file(); m_header = header_t::read(reader); m_deps_json.set_location(&m_header.deps_json_location()); m_runtimeconfig_json.set_location(&m_header.runtimeconfig_json_location()); unmap_bundle(addr); return StatusCode::Success; } catch (StatusCode e) { return e; } } char* info_t::config_t::map(const pal::string_t& path, const location_t* &location) { assert(is_single_file_bundle()); const bundle::info_t* app = bundle::info_t::the_app; if (app->m_deps_json.matches(path)) { location = app->m_deps_json.m_location; } else if (app->m_runtimeconfig_json.matches(path)) { location = app->m_runtimeconfig_json.m_location; } else { return nullptr; } // When necessary to map the deps.json or runtimeconfig.json files, we map the whole single-file bundle, // and return the address at the appropriate offset. // This is because: // * The host is the only code that is currently running and trying to map the bundle. // * Files can only be memory mapped at page-aligned offsets, and in whole page units. // Therefore, mapping only portions of the bundle will involve align-down/round-up calculations, and associated offset adjustments. // We choose the simpler approach of rounding to the whole file // * There is no performance limitation due to a larger sized mapping, since we actually only read the pages with relevant contents. // * Files that are too large to be mapped (ex: that exhaust 32-bit virtual address space) are not supported. #ifdef _WIN32 // Since we can't use in-situ parsing on Windows, as JSON data is encoded in // UTF-8 and the host expects wide strings. // We do not need COW and read-only mapping will be enough. char* addr = (char*)pal::mmap_read(app->m_bundle_path); #else // _WIN32 char* addr = (char*)pal::mmap_copy_on_write(app->m_bundle_path); #endif // _WIN32 if (addr == nullptr) { trace::error(_X("Failure processing application bundle.")); trace::error(_X("Failed to map bundle file [%s]"), path.c_str()); } trace::info(_X("Mapped bundle for [%s]"), path.c_str()); return addr + location->offset + app->m_offset_in_file; } void info_t::config_t::unmap(const char* addr, const location_t* location) { // Adjust to the beginning of the bundle. const bundle::info_t* app = bundle::info_t::the_app; addr -= location->offset - app->m_offset_in_file; bundle::info_t::the_app->unmap_bundle(addr); } const char* info_t::map_bundle() { const void *addr = pal::mmap_read(m_bundle_path, &m_bundle_size); if (addr == nullptr) { trace::error(_X("Failure processing application bundle.")); trace::error(_X("Couldn't memory map the bundle file for reading.")); throw StatusCode::BundleExtractionIOError; } trace::info(_X("Mapped application bundle")); return (const char *)addr; } void info_t::unmap_bundle(const char* addr) const { if (!pal::munmap((void*)addr, m_bundle_size)) { trace::warning(_X("Failed to unmap bundle after extraction.")); } else { trace::info(_X("Unmapped application bundle")); } }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/inc/ecmakey.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once // The byte values of the ECMA pseudo public key and its token. const BYTE g_rbNeutralPublicKey[] = { 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0 }; const BYTE g_rbNeutralPublicKeyToken[] = { 0xb7, 0x7a, 0x5c, 0x56, 0x19, 0x34, 0xe0, 0x89 };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once // The byte values of the ECMA pseudo public key and its token. const BYTE g_rbNeutralPublicKey[] = { 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0 }; const BYTE g_rbNeutralPublicKeyToken[] = { 0xb7, 0x7a, 0x5c, 0x56, 0x19, 0x34, 0xe0, 0x89 };
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/vm/perfmap.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: perfmap.cpp // #include "common.h" #if defined(FEATURE_PERFMAP) && !defined(DACCESS_COMPILE) #include <clrconfignocache.h> #include "perfmap.h" #include "perfinfo.h" #include "pal.h" // The code addresses are actually native image offsets during crossgen. Print // them as 32-bit numbers for consistent output when cross-targeting and to // make the output more compact. #define FMT_CODE_ADDR "%p" Volatile<bool> PerfMap::s_enabled = false; PerfMap * PerfMap::s_Current = nullptr; bool PerfMap::s_ShowOptimizationTiers = false; // Initialize the map for the process - called from EEStartupHelper. void PerfMap::Initialize() { LIMITED_METHOD_CONTRACT; // Only enable the map if requested. if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapEnabled)) { // Get the current process id. int currentPid = GetCurrentProcessId(); // Create the map. s_Current = new PerfMap(currentPid); int signalNum = (int) CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapIgnoreSignal); if (signalNum > 0) { PAL_IgnoreProfileSignal(signalNum); } if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapShowOptimizationTiers) != 0) { s_ShowOptimizationTiers = true; } s_enabled = true; const char* jitdumpPath; char jitdumpPathBuffer[4096]; CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath"); if (value.IsSet()) { jitdumpPath = value.AsString(); } else { GetTempPathA(sizeof(jitdumpPathBuffer) - 1, jitdumpPathBuffer); jitdumpPath = jitdumpPathBuffer; } PAL_PerfJitDump_Start(jitdumpPath); } } // Destroy the map for the process - called from EEShutdownHelper. void PerfMap::Destroy() { LIMITED_METHOD_CONTRACT; if (s_enabled) { s_enabled = false; // PAL_PerfJitDump_Finish is lock protected and can safely be called multiple times PAL_PerfJitDump_Finish(); } } // Construct a new map for the process. PerfMap::PerfMap(int pid) { LIMITED_METHOD_CONTRACT; // Initialize with no failures. m_ErrorEncountered = false; m_StubsMapped = 0; // Build the path to the map file on disk. WCHAR tempPath[MAX_LONGPATH+1]; if(!GetTempPathW(MAX_LONGPATH, tempPath)) { return; } SString path; path.Printf("%Sperf-%d.map", &tempPath, pid); // Open the map file for writing. OpenFile(path); m_PerfInfo = new PerfInfo(pid); } // Construct a new map without a specified file name. // Used for offline creation of NGEN map files. PerfMap::PerfMap() : m_FileStream(nullptr) , m_PerfInfo(nullptr) { LIMITED_METHOD_CONTRACT; // Initialize with no failures. m_ErrorEncountered = false; m_StubsMapped = 0; } // Clean-up resources. PerfMap::~PerfMap() { LIMITED_METHOD_CONTRACT; delete m_FileStream; m_FileStream = nullptr; delete m_PerfInfo; m_PerfInfo = nullptr; } // Open the specified destination map file. void PerfMap::OpenFile(SString& path) { STANDARD_VM_CONTRACT; // Open the file stream. m_FileStream = new (nothrow) CFileStream(); if(m_FileStream != nullptr) { HRESULT hr = m_FileStream->OpenForWrite(path.GetUnicode()); if(FAILED(hr)) { delete m_FileStream; m_FileStream = nullptr; } } } // Write a line to the map file. void PerfMap::WriteLine(SString& line) { STANDARD_VM_CONTRACT; EX_TRY { // Write the line. // The PAL already takes a lock when writing, so we don't need to do so here. StackScratchBuffer scratch; const char * strLine = line.GetANSI(scratch); ULONG inCount = line.GetCount(); ULONG outCount; m_FileStream->Write(strLine, inCount, &outCount); if (inCount != outCount) { // This will cause us to stop writing to the file. // The file will still remain open until shutdown so that we don't have to take a lock at this level when we touch the file stream. m_ErrorEncountered = true; } } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a method to the map. void PerfMap::LogMethod(MethodDesc * pMethod, PCODE pCode, size_t codeSize, const char *optimizationTier) { CONTRACTL{ THROWS; GC_NOTRIGGER; MODE_PREEMPTIVE; PRECONDITION(pMethod != nullptr); PRECONDITION(pCode != nullptr); PRECONDITION(codeSize > 0); } CONTRACTL_END; if (m_FileStream == nullptr || m_ErrorEncountered) { // A failure occurred, do not log. return; } // Logging failures should not cause any exceptions to flow upstream. EX_TRY { // Get the full method signature. SString name; pMethod->GetFullMethodInfo(name); // Build the map file line. StackScratchBuffer scratch; if (optimizationTier != nullptr && s_ShowOptimizationTiers) { name.AppendPrintf("[%s]", optimizationTier); } SString line; line.Printf(FMT_CODE_ADDR " %x %s\n", pCode, codeSize, name.GetANSI(scratch)); // Write the line. WriteLine(line); PAL_PerfJitDump_LogMethod((void*)pCode, codeSize, name.GetANSI(scratch), nullptr, nullptr); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } void PerfMap::LogImageLoad(PEAssembly * pPEAssembly) { if (s_enabled) { s_Current->LogImage(pPEAssembly); } } // Log an image load to the map. void PerfMap::LogImage(PEAssembly * pPEAssembly) { CONTRACTL{ THROWS; GC_NOTRIGGER; MODE_PREEMPTIVE; PRECONDITION(pPEAssembly != nullptr); } CONTRACTL_END; if (m_FileStream == nullptr || m_ErrorEncountered) { // A failure occurred, do not log. return; } EX_TRY { WCHAR wszSignature[39]; GetNativeImageSignature(pPEAssembly, wszSignature, ARRAY_SIZE(wszSignature)); m_PerfInfo->LogImage(pPEAssembly, wszSignature); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a method to the map. void PerfMap::LogJITCompiledMethod(MethodDesc * pMethod, PCODE pCode, size_t codeSize, PrepareCodeConfig *pConfig) { LIMITED_METHOD_CONTRACT; if (!s_enabled) { return; } const char *optimizationTier = nullptr; if (s_ShowOptimizationTiers) { optimizationTier = PrepareCodeConfig::GetJitOptimizationTierStr(pConfig, pMethod); } s_Current->LogMethod(pMethod, pCode, codeSize, optimizationTier); } // Log a pre-compiled method to the perfmap. void PerfMap::LogPreCompiledMethod(MethodDesc * pMethod, PCODE pCode) { LIMITED_METHOD_CONTRACT; if (!s_enabled) { return; } // Get information about the NGEN'd method code. EECodeInfo codeInfo(pCode); _ASSERTE(codeInfo.IsValid()); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); // Logging failures should not cause any exceptions to flow upstream. EX_TRY { // Get the full method signature. SString name; pMethod->GetFullMethodInfo(name); StackScratchBuffer scratch; if (s_ShowOptimizationTiers) { name.AppendPrintf(W("[PreJIT]")); } // NGEN can split code between hot and cold sections which are separate in memory. // Emit an entry for each section if it is used. if (methodRegionInfo.hotSize > 0) { PAL_PerfJitDump_LogMethod((void*)methodRegionInfo.hotStartAddress, methodRegionInfo.hotSize, name.GetANSI(scratch), nullptr, nullptr); } if (methodRegionInfo.coldSize > 0) { if (s_ShowOptimizationTiers) { pMethod->GetFullMethodInfo(name); name.AppendPrintf(W("[PreJit-cold]")); } PAL_PerfJitDump_LogMethod((void*)methodRegionInfo.coldStartAddress, methodRegionInfo.coldSize, name.GetANSI(scratch), nullptr, nullptr); } } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a set of stub to the map. void PerfMap::LogStubs(const char* stubType, const char* stubOwner, PCODE pCode, size_t codeSize) { LIMITED_METHOD_CONTRACT; if (!s_enabled || s_Current->m_FileStream == nullptr) { return; } // Logging failures should not cause any exceptions to flow upstream. EX_TRY { if(!stubOwner) { stubOwner = "?"; } if(!stubType) { stubType = "?"; } // Build the map file line. StackScratchBuffer scratch; SString name; name.Printf("stub<%d> %s<%s>", ++(s_Current->m_StubsMapped), stubType, stubOwner); SString line; line.Printf(FMT_CODE_ADDR " %x %s\n", pCode, codeSize, name.GetANSI(scratch)); // Write the line. s_Current->WriteLine(line); PAL_PerfJitDump_LogMethod((void*)pCode, codeSize, name.GetANSI(scratch), nullptr, nullptr); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } void PerfMap::GetNativeImageSignature(PEAssembly * pPEAssembly, WCHAR * pwszSig, unsigned int nSigSize) { CONTRACTL{ PRECONDITION(pPEAssembly != nullptr); PRECONDITION(pwszSig != nullptr); PRECONDITION(nSigSize >= 39); } CONTRACTL_END; // We use the MVID as the signature, since ready to run images // don't have a native image signature. GUID mvid; pPEAssembly->GetMVID(&mvid); if(!StringFromGUID2(mvid, pwszSig, nSigSize)) { pwszSig[0] = '\0'; } } // Create a new native image perf map. NativeImagePerfMap::NativeImagePerfMap(Assembly * pAssembly, BSTR pDestPath) : PerfMap() { STANDARD_VM_CONTRACT; // Generate perfmap path. // Get the assembly simple name. LPCUTF8 lpcSimpleName = pAssembly->GetSimpleName(); // Get the native image signature (GUID). // Used to ensure that we match symbols to the correct NGEN image. WCHAR wszSignature[39]; GetNativeImageSignature(pAssembly->GetPEAssembly(), wszSignature, ARRAY_SIZE(wszSignature)); // Build the path to the perfmap file, which consists of <inputpath><imagesimplename>.ni.<signature>.map. // Example: /tmp/System.Private.CoreLib.ni.{GUID}.map SString sDestPerfMapPath; sDestPerfMapPath.Printf("%S%s.ni.%S.map", pDestPath, lpcSimpleName, wszSignature); // Open the perf map file. OpenFile(sDestPerfMapPath); // Determine whether to emit RVAs or file offsets based on the specified configuration. m_EmitRVAs = true; CLRConfigStringHolder wszFormat(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_NativeImagePerfMapFormat)); if(wszFormat != NULL && (wcsncmp(wszFormat, strOFFSET, wcslen(strOFFSET)) == 0)) { m_EmitRVAs = false; } } // Log data to the perfmap for the specified module. void NativeImagePerfMap::LogDataForModule(Module * pModule) { STANDARD_VM_CONTRACT; PEImageLayout * pLoadedLayout = pModule->GetPEAssembly()->GetLoadedLayout(); _ASSERTE(pLoadedLayout != nullptr); ReadyToRunInfo::MethodIterator mi(pModule->GetReadyToRunInfo()); while (mi.Next()) { MethodDesc* hotDesc = mi.GetMethodDesc(); LogPreCompiledMethod(hotDesc, mi.GetMethodStartAddress(), pLoadedLayout, "ReadyToRun"); } } // Log a pre-compiled method to the perfmap. void NativeImagePerfMap::LogPreCompiledMethod(MethodDesc * pMethod, PCODE pCode, PEImageLayout *pLoadedLayout, const char *optimizationTier) { STANDARD_VM_CONTRACT; _ASSERTE(pLoadedLayout != nullptr); SIZE_T baseAddr = (SIZE_T)pLoadedLayout->GetBase(); // Get information about the NGEN'd method code. EECodeInfo codeInfo(pCode); _ASSERTE(codeInfo.IsValid()); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); // NGEN can split code between hot and cold sections which are separate in memory. // Emit an entry for each section if it is used. PCODE addr; if (methodRegionInfo.hotSize > 0) { addr = (PCODE)methodRegionInfo.hotStartAddress - baseAddr; if (!m_EmitRVAs) { addr = pLoadedLayout->RvaToOffset(addr); } LogMethod(pMethod, addr, methodRegionInfo.hotSize, optimizationTier); } if (methodRegionInfo.coldSize > 0) { addr = (PCODE)methodRegionInfo.coldStartAddress - baseAddr; if (!m_EmitRVAs) { addr = pLoadedLayout->RvaToOffset(addr); } LogMethod(pMethod, addr, methodRegionInfo.coldSize, optimizationTier); } } #endif // FEATURE_PERFMAP && !DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =========================================================================== // File: perfmap.cpp // #include "common.h" #if defined(FEATURE_PERFMAP) && !defined(DACCESS_COMPILE) #include <clrconfignocache.h> #include "perfmap.h" #include "perfinfo.h" #include "pal.h" // The code addresses are actually native image offsets during crossgen. Print // them as 32-bit numbers for consistent output when cross-targeting and to // make the output more compact. #define FMT_CODE_ADDR "%p" Volatile<bool> PerfMap::s_enabled = false; PerfMap * PerfMap::s_Current = nullptr; bool PerfMap::s_ShowOptimizationTiers = false; // Initialize the map for the process - called from EEStartupHelper. void PerfMap::Initialize() { LIMITED_METHOD_CONTRACT; // Only enable the map if requested. if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapEnabled)) { // Get the current process id. int currentPid = GetCurrentProcessId(); // Create the map. s_Current = new PerfMap(currentPid); int signalNum = (int) CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapIgnoreSignal); if (signalNum > 0) { PAL_IgnoreProfileSignal(signalNum); } if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapShowOptimizationTiers) != 0) { s_ShowOptimizationTiers = true; } s_enabled = true; const char* jitdumpPath; char jitdumpPathBuffer[4096]; CLRConfigNoCache value = CLRConfigNoCache::Get("PerfMapJitDumpPath"); if (value.IsSet()) { jitdumpPath = value.AsString(); } else { GetTempPathA(sizeof(jitdumpPathBuffer) - 1, jitdumpPathBuffer); jitdumpPath = jitdumpPathBuffer; } PAL_PerfJitDump_Start(jitdumpPath); } } // Destroy the map for the process - called from EEShutdownHelper. void PerfMap::Destroy() { LIMITED_METHOD_CONTRACT; if (s_enabled) { s_enabled = false; // PAL_PerfJitDump_Finish is lock protected and can safely be called multiple times PAL_PerfJitDump_Finish(); } } // Construct a new map for the process. PerfMap::PerfMap(int pid) { LIMITED_METHOD_CONTRACT; // Initialize with no failures. m_ErrorEncountered = false; m_StubsMapped = 0; // Build the path to the map file on disk. WCHAR tempPath[MAX_LONGPATH+1]; if(!GetTempPathW(MAX_LONGPATH, tempPath)) { return; } SString path; path.Printf("%Sperf-%d.map", &tempPath, pid); // Open the map file for writing. OpenFile(path); m_PerfInfo = new PerfInfo(pid); } // Construct a new map without a specified file name. // Used for offline creation of NGEN map files. PerfMap::PerfMap() : m_FileStream(nullptr) , m_PerfInfo(nullptr) { LIMITED_METHOD_CONTRACT; // Initialize with no failures. m_ErrorEncountered = false; m_StubsMapped = 0; } // Clean-up resources. PerfMap::~PerfMap() { LIMITED_METHOD_CONTRACT; delete m_FileStream; m_FileStream = nullptr; delete m_PerfInfo; m_PerfInfo = nullptr; } // Open the specified destination map file. void PerfMap::OpenFile(SString& path) { STANDARD_VM_CONTRACT; // Open the file stream. m_FileStream = new (nothrow) CFileStream(); if(m_FileStream != nullptr) { HRESULT hr = m_FileStream->OpenForWrite(path.GetUnicode()); if(FAILED(hr)) { delete m_FileStream; m_FileStream = nullptr; } } } // Write a line to the map file. void PerfMap::WriteLine(SString& line) { STANDARD_VM_CONTRACT; EX_TRY { // Write the line. // The PAL already takes a lock when writing, so we don't need to do so here. StackScratchBuffer scratch; const char * strLine = line.GetANSI(scratch); ULONG inCount = line.GetCount(); ULONG outCount; m_FileStream->Write(strLine, inCount, &outCount); if (inCount != outCount) { // This will cause us to stop writing to the file. // The file will still remain open until shutdown so that we don't have to take a lock at this level when we touch the file stream. m_ErrorEncountered = true; } } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a method to the map. void PerfMap::LogMethod(MethodDesc * pMethod, PCODE pCode, size_t codeSize, const char *optimizationTier) { CONTRACTL{ THROWS; GC_NOTRIGGER; MODE_PREEMPTIVE; PRECONDITION(pMethod != nullptr); PRECONDITION(pCode != nullptr); PRECONDITION(codeSize > 0); } CONTRACTL_END; if (m_FileStream == nullptr || m_ErrorEncountered) { // A failure occurred, do not log. return; } // Logging failures should not cause any exceptions to flow upstream. EX_TRY { // Get the full method signature. SString name; pMethod->GetFullMethodInfo(name); // Build the map file line. StackScratchBuffer scratch; if (optimizationTier != nullptr && s_ShowOptimizationTiers) { name.AppendPrintf("[%s]", optimizationTier); } SString line; line.Printf(FMT_CODE_ADDR " %x %s\n", pCode, codeSize, name.GetANSI(scratch)); // Write the line. WriteLine(line); PAL_PerfJitDump_LogMethod((void*)pCode, codeSize, name.GetANSI(scratch), nullptr, nullptr); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } void PerfMap::LogImageLoad(PEAssembly * pPEAssembly) { if (s_enabled) { s_Current->LogImage(pPEAssembly); } } // Log an image load to the map. void PerfMap::LogImage(PEAssembly * pPEAssembly) { CONTRACTL{ THROWS; GC_NOTRIGGER; MODE_PREEMPTIVE; PRECONDITION(pPEAssembly != nullptr); } CONTRACTL_END; if (m_FileStream == nullptr || m_ErrorEncountered) { // A failure occurred, do not log. return; } EX_TRY { WCHAR wszSignature[39]; GetNativeImageSignature(pPEAssembly, wszSignature, ARRAY_SIZE(wszSignature)); m_PerfInfo->LogImage(pPEAssembly, wszSignature); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a method to the map. void PerfMap::LogJITCompiledMethod(MethodDesc * pMethod, PCODE pCode, size_t codeSize, PrepareCodeConfig *pConfig) { LIMITED_METHOD_CONTRACT; if (!s_enabled) { return; } const char *optimizationTier = nullptr; if (s_ShowOptimizationTiers) { optimizationTier = PrepareCodeConfig::GetJitOptimizationTierStr(pConfig, pMethod); } s_Current->LogMethod(pMethod, pCode, codeSize, optimizationTier); } // Log a pre-compiled method to the perfmap. void PerfMap::LogPreCompiledMethod(MethodDesc * pMethod, PCODE pCode) { LIMITED_METHOD_CONTRACT; if (!s_enabled) { return; } // Get information about the NGEN'd method code. EECodeInfo codeInfo(pCode); _ASSERTE(codeInfo.IsValid()); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); // Logging failures should not cause any exceptions to flow upstream. EX_TRY { // Get the full method signature. SString name; pMethod->GetFullMethodInfo(name); StackScratchBuffer scratch; if (s_ShowOptimizationTiers) { name.AppendPrintf(W("[PreJIT]")); } // NGEN can split code between hot and cold sections which are separate in memory. // Emit an entry for each section if it is used. if (methodRegionInfo.hotSize > 0) { PAL_PerfJitDump_LogMethod((void*)methodRegionInfo.hotStartAddress, methodRegionInfo.hotSize, name.GetANSI(scratch), nullptr, nullptr); } if (methodRegionInfo.coldSize > 0) { if (s_ShowOptimizationTiers) { pMethod->GetFullMethodInfo(name); name.AppendPrintf(W("[PreJit-cold]")); } PAL_PerfJitDump_LogMethod((void*)methodRegionInfo.coldStartAddress, methodRegionInfo.coldSize, name.GetANSI(scratch), nullptr, nullptr); } } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } // Log a set of stub to the map. void PerfMap::LogStubs(const char* stubType, const char* stubOwner, PCODE pCode, size_t codeSize) { LIMITED_METHOD_CONTRACT; if (!s_enabled || s_Current->m_FileStream == nullptr) { return; } // Logging failures should not cause any exceptions to flow upstream. EX_TRY { if(!stubOwner) { stubOwner = "?"; } if(!stubType) { stubType = "?"; } // Build the map file line. StackScratchBuffer scratch; SString name; name.Printf("stub<%d> %s<%s>", ++(s_Current->m_StubsMapped), stubType, stubOwner); SString line; line.Printf(FMT_CODE_ADDR " %x %s\n", pCode, codeSize, name.GetANSI(scratch)); // Write the line. s_Current->WriteLine(line); PAL_PerfJitDump_LogMethod((void*)pCode, codeSize, name.GetANSI(scratch), nullptr, nullptr); } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); } void PerfMap::GetNativeImageSignature(PEAssembly * pPEAssembly, WCHAR * pwszSig, unsigned int nSigSize) { CONTRACTL{ PRECONDITION(pPEAssembly != nullptr); PRECONDITION(pwszSig != nullptr); PRECONDITION(nSigSize >= 39); } CONTRACTL_END; // We use the MVID as the signature, since ready to run images // don't have a native image signature. GUID mvid; pPEAssembly->GetMVID(&mvid); if(!StringFromGUID2(mvid, pwszSig, nSigSize)) { pwszSig[0] = '\0'; } } // Create a new native image perf map. NativeImagePerfMap::NativeImagePerfMap(Assembly * pAssembly, BSTR pDestPath) : PerfMap() { STANDARD_VM_CONTRACT; // Generate perfmap path. // Get the assembly simple name. LPCUTF8 lpcSimpleName = pAssembly->GetSimpleName(); // Get the native image signature (GUID). // Used to ensure that we match symbols to the correct NGEN image. WCHAR wszSignature[39]; GetNativeImageSignature(pAssembly->GetPEAssembly(), wszSignature, ARRAY_SIZE(wszSignature)); // Build the path to the perfmap file, which consists of <inputpath><imagesimplename>.ni.<signature>.map. // Example: /tmp/System.Private.CoreLib.ni.{GUID}.map SString sDestPerfMapPath; sDestPerfMapPath.Printf("%S%s.ni.%S.map", pDestPath, lpcSimpleName, wszSignature); // Open the perf map file. OpenFile(sDestPerfMapPath); // Determine whether to emit RVAs or file offsets based on the specified configuration. m_EmitRVAs = true; CLRConfigStringHolder wszFormat(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_NativeImagePerfMapFormat)); if(wszFormat != NULL && (wcsncmp(wszFormat, strOFFSET, wcslen(strOFFSET)) == 0)) { m_EmitRVAs = false; } } // Log data to the perfmap for the specified module. void NativeImagePerfMap::LogDataForModule(Module * pModule) { STANDARD_VM_CONTRACT; PEImageLayout * pLoadedLayout = pModule->GetPEAssembly()->GetLoadedLayout(); _ASSERTE(pLoadedLayout != nullptr); ReadyToRunInfo::MethodIterator mi(pModule->GetReadyToRunInfo()); while (mi.Next()) { MethodDesc* hotDesc = mi.GetMethodDesc(); LogPreCompiledMethod(hotDesc, mi.GetMethodStartAddress(), pLoadedLayout, "ReadyToRun"); } } // Log a pre-compiled method to the perfmap. void NativeImagePerfMap::LogPreCompiledMethod(MethodDesc * pMethod, PCODE pCode, PEImageLayout *pLoadedLayout, const char *optimizationTier) { STANDARD_VM_CONTRACT; _ASSERTE(pLoadedLayout != nullptr); SIZE_T baseAddr = (SIZE_T)pLoadedLayout->GetBase(); // Get information about the NGEN'd method code. EECodeInfo codeInfo(pCode); _ASSERTE(codeInfo.IsValid()); IJitManager::MethodRegionInfo methodRegionInfo; codeInfo.GetMethodRegionInfo(&methodRegionInfo); // NGEN can split code between hot and cold sections which are separate in memory. // Emit an entry for each section if it is used. PCODE addr; if (methodRegionInfo.hotSize > 0) { addr = (PCODE)methodRegionInfo.hotStartAddress - baseAddr; if (!m_EmitRVAs) { addr = pLoadedLayout->RvaToOffset(addr); } LogMethod(pMethod, addr, methodRegionInfo.hotSize, optimizationTier); } if (methodRegionInfo.coldSize > 0) { addr = (PCODE)methodRegionInfo.coldStartAddress - baseAddr; if (!m_EmitRVAs) { addr = pLoadedLayout->RvaToOffset(addr); } LogMethod(pMethod, addr, methodRegionInfo.coldSize, optimizationTier); } } #endif // FEATURE_PERFMAP && !DACCESS_COMPILE
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/pal/tests/palsuite/c_runtime/tanhf/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test1.c ** ** Purpose: Test to ensure that tanhf return the correct values ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** Fail ** fabs ** **===========================================================================*/ #include <palsuite.h> // binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this // is slightly too accurate when writing tests meant to run against libm implementations // for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get. // // The tests themselves will take PAL_EPSILON and adjust it according to the expected result // so that the delta used for comparison will compare the most significant digits and ignore // any digits that are outside the double precision range (6-9 digits). // For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON // for the variance, while an expected result in the format of 0.0xxxxxxxxx will use // PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10. #define PAL_EPSILON 4.76837158e-07 #define PAL_NAN sqrtf(-1.0f) #define PAL_POSINF -logf(0.0f) #define PAL_NEGINF logf(0.0f) /** * Helper test structure */ struct test { float value; /* value to test the function with */ float expected; /* expected result */ float variance; /* maximum delta between the expected and actual result */ }; /** * tanhf_test1_validate * * test validation function */ void __cdecl tanhf_test1_validate(float value, float expected, float variance) { float result = tanhf(value); /* * The test is valid when the difference between result * and expected is less than or equal to variance */ float delta = fabsf(result - expected); if (delta > variance) { Fail("tanhf(%g) returned %10.9g when it should have returned %10.9g", value, result, expected); } } /** * tanhf_test1_validate * * test validation function for values returning NaN */ void __cdecl tanhf_test1_validate_isnan(float value) { float result = tanhf(value); if (!_isnanf(result)) { Fail("tanhf(%g) returned %10.9g when it should have returned %10.9g", value, result, PAL_NAN); } } /** * main * * executable entry point */ PALTEST(c_runtime_tanhf_test1_paltest_tanhf_test1, "c_runtime/tanhf/test1/paltest_tanhf_test1") { struct test tests[] = { /* value expected variance */ { 0, 0, PAL_EPSILON }, { 0.318309886f, 0.307977913f, PAL_EPSILON }, // value: 1 / pi { 0.434294482f, 0.408904012f, PAL_EPSILON }, // value: log10f(e) { 0.636619772f, 0.562593600f, PAL_EPSILON }, // value: 2 / pi { 0.693147181f, 0.6f, PAL_EPSILON }, // value: ln(2) { 0.707106781f, 0.608859365f, PAL_EPSILON }, // value: 1 / sqrtf(2) { 0.785398163f, 0.655794203f, PAL_EPSILON }, // value: pi / 4 { 1, 0.761594156f, PAL_EPSILON }, { 1.12837917f, 0.810463806f, PAL_EPSILON }, // value: 2 / sqrtf(pi) { 1.41421356f, 0.888385562f, PAL_EPSILON }, // value: sqrtf(2) { 1.44269504f, 0.894238946f, PAL_EPSILON }, // value: logf2(e) { 1.57079633f, 0.917152336f, PAL_EPSILON }, // value: pi / 2 { 2.30258509f, 0.980198020f, PAL_EPSILON }, // value: ln(10) { 2.71828183f, 0.991328916f, PAL_EPSILON }, // value: e { 3.14159265f, 0.996272076f, PAL_EPSILON }, // value: pi { PAL_POSINF, 1, PAL_EPSILON * 10 } }; /* PAL initialization */ if (PAL_Initialize(argc, argv) != 0) { return FAIL; } for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++) { tanhf_test1_validate( tests[i].value, tests[i].expected, tests[i].variance); tanhf_test1_validate(-tests[i].value, -tests[i].expected, tests[i].variance); } tanhf_test1_validate_isnan(PAL_NAN); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test1.c ** ** Purpose: Test to ensure that tanhf return the correct values ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** Fail ** fabs ** **===========================================================================*/ #include <palsuite.h> // binary32 (float) has a machine epsilon of 2^-23 (approx. 1.19e-07). However, this // is slightly too accurate when writing tests meant to run against libm implementations // for various platforms. 2^-21 (approx. 4.76e-07) seems to be as accurate as we can get. // // The tests themselves will take PAL_EPSILON and adjust it according to the expected result // so that the delta used for comparison will compare the most significant digits and ignore // any digits that are outside the double precision range (6-9 digits). // For example, a test with an expect result in the format of 0.xxxxxxxxx will use PAL_EPSILON // for the variance, while an expected result in the format of 0.0xxxxxxxxx will use // PAL_EPSILON / 10 and and expected result in the format of x.xxxxxx will use PAL_EPSILON * 10. #define PAL_EPSILON 4.76837158e-07 #define PAL_NAN sqrtf(-1.0f) #define PAL_POSINF -logf(0.0f) #define PAL_NEGINF logf(0.0f) /** * Helper test structure */ struct test { float value; /* value to test the function with */ float expected; /* expected result */ float variance; /* maximum delta between the expected and actual result */ }; /** * tanhf_test1_validate * * test validation function */ void __cdecl tanhf_test1_validate(float value, float expected, float variance) { float result = tanhf(value); /* * The test is valid when the difference between result * and expected is less than or equal to variance */ float delta = fabsf(result - expected); if (delta > variance) { Fail("tanhf(%g) returned %10.9g when it should have returned %10.9g", value, result, expected); } } /** * tanhf_test1_validate * * test validation function for values returning NaN */ void __cdecl tanhf_test1_validate_isnan(float value) { float result = tanhf(value); if (!_isnanf(result)) { Fail("tanhf(%g) returned %10.9g when it should have returned %10.9g", value, result, PAL_NAN); } } /** * main * * executable entry point */ PALTEST(c_runtime_tanhf_test1_paltest_tanhf_test1, "c_runtime/tanhf/test1/paltest_tanhf_test1") { struct test tests[] = { /* value expected variance */ { 0, 0, PAL_EPSILON }, { 0.318309886f, 0.307977913f, PAL_EPSILON }, // value: 1 / pi { 0.434294482f, 0.408904012f, PAL_EPSILON }, // value: log10f(e) { 0.636619772f, 0.562593600f, PAL_EPSILON }, // value: 2 / pi { 0.693147181f, 0.6f, PAL_EPSILON }, // value: ln(2) { 0.707106781f, 0.608859365f, PAL_EPSILON }, // value: 1 / sqrtf(2) { 0.785398163f, 0.655794203f, PAL_EPSILON }, // value: pi / 4 { 1, 0.761594156f, PAL_EPSILON }, { 1.12837917f, 0.810463806f, PAL_EPSILON }, // value: 2 / sqrtf(pi) { 1.41421356f, 0.888385562f, PAL_EPSILON }, // value: sqrtf(2) { 1.44269504f, 0.894238946f, PAL_EPSILON }, // value: logf2(e) { 1.57079633f, 0.917152336f, PAL_EPSILON }, // value: pi / 2 { 2.30258509f, 0.980198020f, PAL_EPSILON }, // value: ln(10) { 2.71828183f, 0.991328916f, PAL_EPSILON }, // value: e { 3.14159265f, 0.996272076f, PAL_EPSILON }, // value: pi { PAL_POSINF, 1, PAL_EPSILON * 10 } }; /* PAL initialization */ if (PAL_Initialize(argc, argv) != 0) { return FAIL; } for (int i = 0; i < (sizeof(tests) / sizeof(struct test)); i++) { tanhf_test1_validate( tests[i].value, tests[i].expected, tests[i].variance); tanhf_test1_validate(-tests[i].value, -tests[i].expected, tests[i].variance); } tanhf_test1_validate_isnan(PAL_NAN); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/debug/daccess/amd64/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "../../shared/amd64/primitives.cpp"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "../../shared/amd64/primitives.cpp"
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/nativeaot/libunwind/src/Unwind-EHABI.cpp
//===--------------------------- Unwind-EHABI.cpp -------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // // Implements ARM zero-cost C++ exceptions // //===----------------------------------------------------------------------===// #include "Unwind-EHABI.h" #if defined(_LIBUNWIND_ARM_EHABI) #include <inttypes.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "config.h" #include "libunwind.h" #include "libunwind_ext.h" #include "unwind.h" namespace { // Strange order: take words in order, but inside word, take from most to least // signinficant byte. uint8_t getByte(const uint32_t* data, size_t offset) { const uint8_t* byteData = reinterpret_cast<const uint8_t*>(data); return byteData[(offset & ~(size_t)0x03) + (3 - (offset & (size_t)0x03))]; } const char* getNextWord(const char* data, uint32_t* out) { *out = *reinterpret_cast<const uint32_t*>(data); return data + 4; } const char* getNextNibble(const char* data, uint32_t* out) { *out = *reinterpret_cast<const uint16_t*>(data); return data + 2; } struct Descriptor { // See # 9.2 typedef enum { SU16 = 0, // Short descriptor, 16-bit entries LU16 = 1, // Long descriptor, 16-bit entries LU32 = 3, // Long descriptor, 32-bit entries RESERVED0 = 4, RESERVED1 = 5, RESERVED2 = 6, RESERVED3 = 7, RESERVED4 = 8, RESERVED5 = 9, RESERVED6 = 10, RESERVED7 = 11, RESERVED8 = 12, RESERVED9 = 13, RESERVED10 = 14, RESERVED11 = 15 } Format; // See # 9.2 typedef enum { CLEANUP = 0x0, FUNC = 0x1, CATCH = 0x2, INVALID = 0x4 } Kind; }; _Unwind_Reason_Code ProcessDescriptors( _Unwind_State state, _Unwind_Control_Block* ucbp, struct _Unwind_Context* context, Descriptor::Format format, const char* descriptorStart, uint32_t flags) { // EHT is inlined in the index using compact form. No descriptors. #5 if (flags & 0x1) return _URC_CONTINUE_UNWIND; // TODO: We should check the state here, and determine whether we need to // perform phase1 or phase2 unwinding. (void)state; const char* descriptor = descriptorStart; uint32_t descriptorWord; getNextWord(descriptor, &descriptorWord); while (descriptorWord) { // Read descriptor based on # 9.2. uint32_t length; uint32_t offset; switch (format) { case Descriptor::LU32: descriptor = getNextWord(descriptor, &length); descriptor = getNextWord(descriptor, &offset); break; case Descriptor::LU16: descriptor = getNextNibble(descriptor, &length); descriptor = getNextNibble(descriptor, &offset); break; default: assert(false); return _URC_FAILURE; } // See # 9.2 table for decoding the kind of descriptor. It's a 2-bit value. Descriptor::Kind kind = static_cast<Descriptor::Kind>((length & 0x1) | ((offset & 0x1) << 1)); // Clear off flag from last bit. length &= ~1u; offset &= ~1u; uintptr_t scopeStart = ucbp->pr_cache.fnstart + offset; uintptr_t scopeEnd = scopeStart + length; uintptr_t pc = _Unwind_GetIP(context); bool isInScope = (scopeStart <= pc) && (pc < scopeEnd); switch (kind) { case Descriptor::CLEANUP: { // TODO(ajwong): Handle cleanup descriptors. break; } case Descriptor::FUNC: { // TODO(ajwong): Handle function descriptors. break; } case Descriptor::CATCH: { // Catch descriptors require gobbling one more word. uint32_t landing_pad; descriptor = getNextWord(descriptor, &landing_pad); if (isInScope) { // TODO(ajwong): This is only phase1 compatible logic. Implement // phase2. landing_pad = signExtendPrel31(landing_pad & ~0x80000000); if (landing_pad == 0xffffffff) { return _URC_HANDLER_FOUND; } else if (landing_pad == 0xfffffffe) { return _URC_FAILURE; } else { /* bool is_reference_type = landing_pad & 0x80000000; void* matched_object; if (__cxxabiv1::__cxa_type_match( ucbp, reinterpret_cast<const std::type_info *>(landing_pad), is_reference_type, &matched_object) != __cxxabiv1::ctm_failed) return _URC_HANDLER_FOUND; */ _LIBUNWIND_ABORT("Type matching not implemented"); } } break; } default: _LIBUNWIND_ABORT("Invalid descriptor kind found."); } getNextWord(descriptor, &descriptorWord); } return _URC_CONTINUE_UNWIND; } static _Unwind_Reason_Code unwindOneFrame(_Unwind_State state, _Unwind_Control_Block* ucbp, struct _Unwind_Context* context) { // Read the compact model EHT entry's header # 6.3 const uint32_t* unwindingData = ucbp->pr_cache.ehtp; assert((*unwindingData & 0xf0000000) == 0x80000000 && "Must be a compact entry"); Descriptor::Format format = static_cast<Descriptor::Format>((*unwindingData & 0x0f000000) >> 24); const char *lsda = reinterpret_cast<const char *>(_Unwind_GetLanguageSpecificData(context)); // Handle descriptors before unwinding so they are processed in the context // of the correct stack frame. _Unwind_Reason_Code result = ProcessDescriptors(state, ucbp, context, format, lsda, ucbp->pr_cache.additional); if (result != _URC_CONTINUE_UNWIND) return result; if (__unw_step(reinterpret_cast<unw_cursor_t *>(context)) != UNW_STEP_SUCCESS) return _URC_FAILURE; return _URC_CONTINUE_UNWIND; } // Generates mask discriminator for _Unwind_VRS_Pop, e.g. for _UVRSC_CORE / // _UVRSD_UINT32. uint32_t RegisterMask(uint8_t start, uint8_t count_minus_one) { return ((1U << (count_minus_one + 1)) - 1) << start; } // Generates mask discriminator for _Unwind_VRS_Pop, e.g. for _UVRSC_VFP / // _UVRSD_DOUBLE. uint32_t RegisterRange(uint8_t start, uint8_t count_minus_one) { return ((uint32_t)start << 16) | ((uint32_t)count_minus_one + 1); } } // end anonymous namespace /** * Decodes an EHT entry. * * @param data Pointer to EHT. * @param[out] off Offset from return value (in bytes) to begin interpretation. * @param[out] len Number of bytes in unwind code. * @return Pointer to beginning of unwind code. */ extern "C" const uint32_t* decode_eht_entry(const uint32_t* data, size_t* off, size_t* len) { if ((*data & 0x80000000) == 0) { // 6.2: Generic Model // // EHT entry is a prel31 pointing to the PR, followed by data understood // only by the personality routine. Fortunately, all existing assembler // implementations, including GNU assembler, LLVM integrated assembler, // and ARM assembler, assume that the unwind opcodes come after the // personality routine address. *off = 1; // First byte is size data. *len = (((data[1] >> 24) & 0xff) + 1) * 4; data++; // Skip the first word, which is the prel31 offset. } else { // 6.3: ARM Compact Model // // EHT entries here correspond to the __aeabi_unwind_cpp_pr[012] PRs indeded // by format: Descriptor::Format format = static_cast<Descriptor::Format>((*data & 0x0f000000) >> 24); switch (format) { case Descriptor::SU16: *len = 4; *off = 1; break; case Descriptor::LU16: case Descriptor::LU32: *len = 4 + 4 * ((*data & 0x00ff0000) >> 16); *off = 2; break; default: return nullptr; } } return data; } _LIBUNWIND_EXPORT _Unwind_Reason_Code _Unwind_VRS_Interpret(_Unwind_Context *context, const uint32_t *data, size_t offset, size_t len) { bool wrotePC = false; bool finish = false; while (offset < len && !finish) { uint8_t byte = getByte(data, offset++); if ((byte & 0x80) == 0) { uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp); if (byte & 0x40) sp -= (((uint32_t)byte & 0x3f) << 2) + 4; else sp += ((uint32_t)byte << 2) + 4; _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } else { switch (byte & 0xf0) { case 0x80: { if (offset >= len) return _URC_FAILURE; uint32_t registers = (((uint32_t)byte & 0x0f) << 12) | (((uint32_t)getByte(data, offset++)) << 4); if (!registers) return _URC_FAILURE; if (registers & (1 << 15)) wrotePC = true; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0x90: { uint8_t reg = byte & 0x0f; if (reg == 13 || reg == 15) return _URC_FAILURE; uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_R0 + reg, _UVRSD_UINT32, &sp); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); break; } case 0xa0: { uint32_t registers = RegisterMask(4, byte & 0x07); if (byte & 0x08) registers |= 1 << 14; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0xb0: { switch (byte) { case 0xb0: finish = true; break; case 0xb1: { if (offset >= len) return _URC_FAILURE; uint8_t registers = getByte(data, offset++); if (registers & 0xf0 || !registers) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0xb2: { uint32_t addend = 0; uint32_t shift = 0; // This decodes a uleb128 value. while (true) { if (offset >= len) return _URC_FAILURE; uint32_t v = getByte(data, offset++); addend |= (v & 0x7f) << shift; if ((v & 0x80) == 0) break; shift += 7; } uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp); sp += 0x204 + (addend << 2); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); break; } case 0xb3: { uint8_t v = getByte(data, offset++); _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(static_cast<uint8_t>(v >> 4), v & 0x0f), _UVRSD_VFPX); break; } case 0xb4: case 0xb5: case 0xb6: case 0xb7: return _URC_FAILURE; default: _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(8, byte & 0x07), _UVRSD_VFPX); break; } break; } case 0xc0: { switch (byte) { #if defined(__ARM_WMMX) case 0xc0: case 0xc1: case 0xc2: case 0xc3: case 0xc4: case 0xc5: _Unwind_VRS_Pop(context, _UVRSC_WMMXD, RegisterRange(10, byte & 0x7), _UVRSD_DOUBLE); break; case 0xc6: { uint8_t v = getByte(data, offset++); uint8_t start = static_cast<uint8_t>(v >> 4); uint8_t count_minus_one = v & 0xf; if (start + count_minus_one >= 16) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_WMMXD, RegisterRange(start, count_minus_one), _UVRSD_DOUBLE); break; } case 0xc7: { uint8_t v = getByte(data, offset++); if (!v || v & 0xf0) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_WMMXC, v, _UVRSD_DOUBLE); break; } #endif case 0xc8: case 0xc9: { uint8_t v = getByte(data, offset++); uint8_t start = static_cast<uint8_t>(((byte == 0xc8) ? 16 : 0) + (v >> 4)); uint8_t count_minus_one = v & 0xf; if (start + count_minus_one >= 32) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(start, count_minus_one), _UVRSD_DOUBLE); break; } default: return _URC_FAILURE; } break; } case 0xd0: { if (byte & 0x08) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(8, byte & 0x7), _UVRSD_DOUBLE); break; } default: return _URC_FAILURE; } } } if (!wrotePC) { uint32_t lr; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_LR, _UVRSD_UINT32, &lr); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_IP, _UVRSD_UINT32, &lr, NULL); } return _URC_CONTINUE_UNWIND; } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } static _Unwind_Reason_Code unwind_phase1(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object) { // EHABI #7.3 discusses preserving the VRS in a "temporary VRS" during // phase 1 and then restoring it to the "primary VRS" for phase 2. The // effect is phase 2 doesn't see any of the VRS manipulations from phase 1. // In this implementation, the phases don't share the VRS backing store. // Instead, they are passed the original |uc| and they create a new VRS // from scratch thus achieving the same effect. __unw_init_local(cursor, uc); // Walk each frame looking for a place to stop. for (bool handlerNotFound = true; handlerNotFound;) { // See if frame has code to run (has personality routine). unw_proc_info_t frameInfo; if (__unw_get_proc_info(cursor, &frameInfo) != UNW_ESUCCESS) { _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): __unw_get_proc_info " "failed => _URC_FATAL_PHASE1_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE1_ERROR; } // When tracing, print state information. if (_LIBUNWIND_TRACING_UNWINDING) { char functionBuf[512]; const char *functionName = functionBuf; unw_word_t offset; if ((__unw_get_proc_name(cursor, functionBuf, sizeof(functionBuf), &offset) != UNW_ESUCCESS) || (frameInfo.start_ip + offset > frameInfo.end_ip)) functionName = ".anonymous."; unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): pc=0x%" PRIxPTR ", start_ip=0x%" PRIxPTR ", func=%s, " "lsda=0x%" PRIxPTR ", personality=0x%" PRIxPTR, static_cast<void *>(exception_object), pc, frameInfo.start_ip, functionName, frameInfo.lsda, frameInfo.handler); } // If there is a personality routine, ask it if it will want to stop at // this frame. if (frameInfo.handler != 0) { __personality_routine p = (__personality_routine)(long)(frameInfo.handler); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): calling personality function %p", static_cast<void *>(exception_object), reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(p))); struct _Unwind_Context *context = (struct _Unwind_Context *)(cursor); exception_object->pr_cache.fnstart = frameInfo.start_ip; exception_object->pr_cache.ehtp = (_Unwind_EHT_Header *)frameInfo.unwind_info; exception_object->pr_cache.additional = frameInfo.flags; _Unwind_Reason_Code personalityResult = (*p)(_US_VIRTUAL_UNWIND_FRAME, exception_object, context); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): personality result %d start_ip %x ehtp %p " "additional %x", static_cast<void *>(exception_object), personalityResult, exception_object->pr_cache.fnstart, static_cast<void *>(exception_object->pr_cache.ehtp), exception_object->pr_cache.additional); switch (personalityResult) { case _URC_HANDLER_FOUND: // found a catch clause or locals that need destructing in this frame // stop search and remember stack pointer at the frame handlerNotFound = false; // p should have initialized barrier_cache. EHABI #7.3.5 _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_HANDLER_FOUND", static_cast<void *>(exception_object)); return _URC_NO_REASON; case _URC_CONTINUE_UNWIND: _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_CONTINUE_UNWIND", static_cast<void *>(exception_object)); // continue unwinding break; // EHABI #7.3.3 case _URC_FAILURE: return _URC_FAILURE; default: // something went wrong _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_FATAL_PHASE1_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE1_ERROR; } } } return _URC_NO_REASON; } static _Unwind_Reason_Code unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object, bool resume) { // See comment at the start of unwind_phase1 regarding VRS integrity. __unw_init_local(cursor, uc); _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p)", static_cast<void *>(exception_object)); int frame_count = 0; // Walk each frame until we reach where search phase said to stop. while (true) { // Ask libunwind to get next frame (skip over first which is // _Unwind_RaiseException or _Unwind_Resume). // // Resume only ever makes sense for 1 frame. _Unwind_State state = resume ? _US_UNWIND_FRAME_RESUME : _US_UNWIND_FRAME_STARTING; if (resume && frame_count == 1) { // On a resume, first unwind the _Unwind_Resume() frame. The next frame // is now the landing pad for the cleanup from a previous execution of // phase2. To continue unwindingly correctly, replace VRS[15] with the // IP of the frame that the previous run of phase2 installed the context // for. After this, continue unwinding as if normal. // // See #7.4.6 for details. __unw_set_reg(cursor, UNW_REG_IP, exception_object->unwinder_cache.reserved2, NULL); resume = false; } // Get info about this frame. unw_word_t sp; unw_proc_info_t frameInfo; __unw_get_reg(cursor, UNW_REG_SP, &sp); if (__unw_get_proc_info(cursor, &frameInfo) != UNW_ESUCCESS) { _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): __unw_get_proc_info " "failed => _URC_FATAL_PHASE2_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE2_ERROR; } // When tracing, print state information. if (_LIBUNWIND_TRACING_UNWINDING) { char functionBuf[512]; const char *functionName = functionBuf; unw_word_t offset; if ((__unw_get_proc_name(cursor, functionBuf, sizeof(functionBuf), &offset) != UNW_ESUCCESS) || (frameInfo.start_ip + offset > frameInfo.end_ip)) functionName = ".anonymous."; _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): start_ip=0x%" PRIxPTR ", func=%s, sp=0x%" PRIxPTR ", " "lsda=0x%" PRIxPTR ", personality=0x%" PRIxPTR "", static_cast<void *>(exception_object), frameInfo.start_ip, functionName, sp, frameInfo.lsda, frameInfo.handler); } // If there is a personality routine, tell it we are unwinding. if (frameInfo.handler != 0) { __personality_routine p = (__personality_routine)(long)(frameInfo.handler); struct _Unwind_Context *context = (struct _Unwind_Context *)(cursor); // EHABI #7.2 exception_object->pr_cache.fnstart = frameInfo.start_ip; exception_object->pr_cache.ehtp = (_Unwind_EHT_Header *)frameInfo.unwind_info; exception_object->pr_cache.additional = frameInfo.flags; _Unwind_Reason_Code personalityResult = (*p)(state, exception_object, context); switch (personalityResult) { case _URC_CONTINUE_UNWIND: // Continue unwinding _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): _URC_CONTINUE_UNWIND", static_cast<void *>(exception_object)); // EHABI #7.2 if (sp == exception_object->barrier_cache.sp) { // Phase 1 said we would stop at this frame, but we did not... _LIBUNWIND_ABORT("during phase1 personality function said it would " "stop here, but now in phase2 it did not stop here"); } break; case _URC_INSTALL_CONTEXT: _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): _URC_INSTALL_CONTEXT", static_cast<void *>(exception_object)); // Personality routine says to transfer control to landing pad. // We may get control back if landing pad calls _Unwind_Resume(). if (_LIBUNWIND_TRACING_UNWINDING) { unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); __unw_get_reg(cursor, UNW_REG_SP, &sp); _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p): re-entering " "user code with ip=0x%" PRIxPTR ", sp=0x%" PRIxPTR, static_cast<void *>(exception_object), pc, sp); } { // EHABI #7.4.1 says we need to preserve pc for when _Unwind_Resume // is called back, to find this same frame. unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); exception_object->unwinder_cache.reserved2 = (uint32_t)pc; } __unw_resume(cursor); // __unw_resume() only returns if there was an error. return _URC_FATAL_PHASE2_ERROR; // # EHABI #7.4.3 case _URC_FAILURE: abort(); default: // Personality routine returned an unknown result code. _LIBUNWIND_DEBUG_LOG("personality function returned unknown result %d", personalityResult); return _URC_FATAL_PHASE2_ERROR; } } frame_count++; } // Clean up phase did not resume at the frame that the search phase // said it would... return _URC_FATAL_PHASE2_ERROR; } /// Called by __cxa_throw. Only returns if there is a fatal error. _LIBUNWIND_EXPORT _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_RaiseException(ex_obj=%p)", static_cast<void *>(exception_object)); unw_context_t uc; unw_cursor_t cursor; __unw_getcontext(&uc); // This field for is for compatibility with GCC to say this isn't a forced // unwind. EHABI #7.2 exception_object->unwinder_cache.reserved1 = 0; // phase 1: the search phase _Unwind_Reason_Code phase1 = unwind_phase1(&uc, &cursor, exception_object); if (phase1 != _URC_NO_REASON) return phase1; // phase 2: the clean up phase return unwind_phase2(&uc, &cursor, exception_object, false); } _LIBUNWIND_EXPORT void _Unwind_Complete(_Unwind_Exception* exception_object) { // This is to be called when exception handling completes to give us a chance // to perform any housekeeping. EHABI #7.2. But we have nothing to do here. (void)exception_object; } /// When _Unwind_RaiseException() is in phase2, it hands control /// to the personality function at each frame. The personality /// may force a jump to a landing pad in that function, the landing /// pad code may then call _Unwind_Resume() to continue with the /// unwinding. Note: the call to _Unwind_Resume() is from compiler /// generated user code. All other _Unwind_* routines are called /// by the C++ runtime __cxa_* routines. /// /// Note: re-throwing an exception (as opposed to continuing the unwind) /// is implemented by having the code call __cxa_rethrow() which /// in turn calls _Unwind_Resume_or_Rethrow(). _LIBUNWIND_EXPORT void _Unwind_Resume(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_Resume(ex_obj=%p)", static_cast<void *>(exception_object)); unw_context_t uc; unw_cursor_t cursor; __unw_getcontext(&uc); // _Unwind_RaiseException on EHABI will always set the reserved1 field to 0, // which is in the same position as private_1 below. // TODO(ajwong): Who wronte the above? Why is it true? unwind_phase2(&uc, &cursor, exception_object, true); // Clients assume _Unwind_Resume() does not return, so all we can do is abort. _LIBUNWIND_ABORT("_Unwind_Resume() can't return"); } /// Called by personality handler during phase 2 to get LSDA for current frame. _LIBUNWIND_EXPORT uintptr_t _Unwind_GetLanguageSpecificData(struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; unw_proc_info_t frameInfo; uintptr_t result = 0; if (__unw_get_proc_info(cursor, &frameInfo) == UNW_ESUCCESS) result = (uintptr_t)frameInfo.lsda; _LIBUNWIND_TRACE_API( "_Unwind_GetLanguageSpecificData(context=%p) => 0x%llx", static_cast<void *>(context), (long long)result); return result; } static uint64_t ValueAsBitPattern(_Unwind_VRS_DataRepresentation representation, void* valuep) { uint64_t value = 0; switch (representation) { case _UVRSD_UINT32: case _UVRSD_FLOAT: memcpy(&value, valuep, sizeof(uint32_t)); break; case _UVRSD_VFPX: case _UVRSD_UINT64: case _UVRSD_DOUBLE: memcpy(&value, valuep, sizeof(uint64_t)); break; } return value; } _LIBUNWIND_EXPORT _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep, unw_word_t *pos) { _LIBUNWIND_TRACE_API("_Unwind_VRS_Set(context=%p, regclass=%d, reg=%d, " "rep=%d, value=0x%llX)", static_cast<void *>(context), regclass, regno, representation, ValueAsBitPattern(representation, valuep)); unw_cursor_t *cursor = (unw_cursor_t *)context; switch (regclass) { case _UVRSC_CORE: if (representation != _UVRSD_UINT32 || regno > 15) return _UVRSR_FAILED; return __unw_set_reg(cursor, (unw_regnum_t)(UNW_ARM_R0 + regno), *(unw_word_t *)valuep,(unw_word_t *)pos) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_VFP: if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; if (representation == _UVRSD_VFPX) { // Can only touch d0-15 with FSTMFDX. if (regno > 15) return _UVRSR_FAILED; __unw_save_vfp_as_X(cursor); } else { if (regno > 31) return _UVRSR_FAILED; } return __unw_set_fpreg(cursor, (unw_regnum_t)(UNW_ARM_D0 + regno), *(unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #if defined(__ARM_WMMX) case _UVRSC_WMMXC: if (representation != _UVRSD_UINT32 || regno > 3) return _UVRSR_FAILED; return __unw_set_reg(cursor, (unw_regnum_t)(UNW_ARM_WC0 + regno), *(unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_WMMXD: if (representation != _UVRSD_DOUBLE || regno > 31) return _UVRSR_FAILED; return __unw_set_fpreg(cursor, (unw_regnum_t)(UNW_ARM_WR0 + regno), *(unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #else case _UVRSC_WMMXC: case _UVRSC_WMMXD: break; #endif } _LIBUNWIND_ABORT("unsupported register class"); } static _Unwind_VRS_Result _Unwind_VRS_Get_Internal(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep) { unw_cursor_t *cursor = (unw_cursor_t *)context; switch (regclass) { case _UVRSC_CORE: if (representation != _UVRSD_UINT32 || regno > 15) return _UVRSR_FAILED; return __unw_get_reg(cursor, (unw_regnum_t)(UNW_ARM_R0 + regno), (unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_VFP: if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; if (representation == _UVRSD_VFPX) { // Can only touch d0-15 with FSTMFDX. if (regno > 15) return _UVRSR_FAILED; __unw_save_vfp_as_X(cursor); } else { if (regno > 31) return _UVRSR_FAILED; } return __unw_get_fpreg(cursor, (unw_regnum_t)(UNW_ARM_D0 + regno), (unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #if defined(__ARM_WMMX) case _UVRSC_WMMXC: if (representation != _UVRSD_UINT32 || regno > 3) return _UVRSR_FAILED; return __unw_get_reg(cursor, (unw_regnum_t)(UNW_ARM_WC0 + regno), (unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_WMMXD: if (representation != _UVRSD_DOUBLE || regno > 31) return _UVRSR_FAILED; return __unw_get_fpreg(cursor, (unw_regnum_t)(UNW_ARM_WR0 + regno), (unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #else case _UVRSC_WMMXC: case _UVRSC_WMMXD: break; #endif } _LIBUNWIND_ABORT("unsupported register class"); } _LIBUNWIND_EXPORT _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep) { _Unwind_VRS_Result result = _Unwind_VRS_Get_Internal(context, regclass, regno, representation, valuep); _LIBUNWIND_TRACE_API("_Unwind_VRS_Get(context=%p, regclass=%d, reg=%d, " "rep=%d, value=0x%llX, result = %d)", static_cast<void *>(context), regclass, regno, representation, ValueAsBitPattern(representation, valuep), result); return result; } _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t discriminator, _Unwind_VRS_DataRepresentation representation) { _LIBUNWIND_TRACE_API("_Unwind_VRS_Pop(context=%p, regclass=%d, " "discriminator=%d, representation=%d)", static_cast<void *>(context), regclass, discriminator, representation); switch (regclass) { case _UVRSC_WMMXC: #if !defined(__ARM_WMMX) break; #endif case _UVRSC_CORE: { if (representation != _UVRSD_UINT32) return _UVRSR_FAILED; // When popping SP from the stack, we don't want to override it from the // computed new stack location. See EHABI #7.5.4 table 3. bool poppedSP = false; uint32_t* sp; uint32_t* pos; if (_Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp) != _UVRSR_OK) { return _UVRSR_FAILED; } for (uint32_t i = 0; i < 16; ++i) { if (!(discriminator & static_cast<uint32_t>(1 << i))) continue; pos = sp; uint32_t value = *sp++; if (regclass == _UVRSC_CORE && i == 13) poppedSP = true; if (_Unwind_VRS_Set(context, regclass, i, _UVRSD_UINT32, &value, pos) != _UVRSR_OK) { return _UVRSR_FAILED; } } if (!poppedSP) { return _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } return _UVRSR_OK; } case _UVRSC_WMMXD: #if !defined(__ARM_WMMX) break; #endif case _UVRSC_VFP: { if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; uint32_t first = discriminator >> 16; uint32_t count = discriminator & 0xffff; uint32_t end = first+count; uint32_t* sp; if (_Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp) != _UVRSR_OK) { return _UVRSR_FAILED; } // For _UVRSD_VFPX, we're assuming the data is stored in FSTMX "standard // format 1", which is equivalent to FSTMD + a padding word. for (uint32_t i = first; i < end; ++i) { // SP is only 32-bit aligned so don't copy 64-bit at a time. uint64_t value = *sp++; value |= ((uint64_t)(*sp++)) << 32; if (_Unwind_VRS_Set(context, regclass, i, representation, &value, NULL) != _UVRSR_OK) return _UVRSR_FAILED; } if (representation == _UVRSD_VFPX) ++sp; return _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } } _LIBUNWIND_ABORT("unsupported register class"); } /// Called by personality handler during phase 2 to find the start of the /// function. _LIBUNWIND_EXPORT uintptr_t _Unwind_GetRegionStart(struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; unw_proc_info_t frameInfo; uintptr_t result = 0; if (__unw_get_proc_info(cursor, &frameInfo) == UNW_ESUCCESS) result = (uintptr_t)frameInfo.start_ip; _LIBUNWIND_TRACE_API("_Unwind_GetRegionStart(context=%p) => 0x%llX", static_cast<void *>(context), (long long)result); return result; } /// Called by personality handler during phase 2 if a foreign exception // is caught. _LIBUNWIND_EXPORT void _Unwind_DeleteException(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_DeleteException(ex_obj=%p)", static_cast<void *>(exception_object)); if (exception_object->exception_cleanup != NULL) (*exception_object->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, exception_object); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __gnu_unwind_frame(_Unwind_Exception *exception_object, struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; if (__unw_step(cursor) != UNW_STEP_SUCCESS) return _URC_FAILURE; return _URC_OK; } #endif // defined(_LIBUNWIND_ARM_EHABI)
//===--------------------------- Unwind-EHABI.cpp -------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // // Implements ARM zero-cost C++ exceptions // //===----------------------------------------------------------------------===// #include "Unwind-EHABI.h" #if defined(_LIBUNWIND_ARM_EHABI) #include <inttypes.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "config.h" #include "libunwind.h" #include "libunwind_ext.h" #include "unwind.h" namespace { // Strange order: take words in order, but inside word, take from most to least // signinficant byte. uint8_t getByte(const uint32_t* data, size_t offset) { const uint8_t* byteData = reinterpret_cast<const uint8_t*>(data); return byteData[(offset & ~(size_t)0x03) + (3 - (offset & (size_t)0x03))]; } const char* getNextWord(const char* data, uint32_t* out) { *out = *reinterpret_cast<const uint32_t*>(data); return data + 4; } const char* getNextNibble(const char* data, uint32_t* out) { *out = *reinterpret_cast<const uint16_t*>(data); return data + 2; } struct Descriptor { // See # 9.2 typedef enum { SU16 = 0, // Short descriptor, 16-bit entries LU16 = 1, // Long descriptor, 16-bit entries LU32 = 3, // Long descriptor, 32-bit entries RESERVED0 = 4, RESERVED1 = 5, RESERVED2 = 6, RESERVED3 = 7, RESERVED4 = 8, RESERVED5 = 9, RESERVED6 = 10, RESERVED7 = 11, RESERVED8 = 12, RESERVED9 = 13, RESERVED10 = 14, RESERVED11 = 15 } Format; // See # 9.2 typedef enum { CLEANUP = 0x0, FUNC = 0x1, CATCH = 0x2, INVALID = 0x4 } Kind; }; _Unwind_Reason_Code ProcessDescriptors( _Unwind_State state, _Unwind_Control_Block* ucbp, struct _Unwind_Context* context, Descriptor::Format format, const char* descriptorStart, uint32_t flags) { // EHT is inlined in the index using compact form. No descriptors. #5 if (flags & 0x1) return _URC_CONTINUE_UNWIND; // TODO: We should check the state here, and determine whether we need to // perform phase1 or phase2 unwinding. (void)state; const char* descriptor = descriptorStart; uint32_t descriptorWord; getNextWord(descriptor, &descriptorWord); while (descriptorWord) { // Read descriptor based on # 9.2. uint32_t length; uint32_t offset; switch (format) { case Descriptor::LU32: descriptor = getNextWord(descriptor, &length); descriptor = getNextWord(descriptor, &offset); break; case Descriptor::LU16: descriptor = getNextNibble(descriptor, &length); descriptor = getNextNibble(descriptor, &offset); break; default: assert(false); return _URC_FAILURE; } // See # 9.2 table for decoding the kind of descriptor. It's a 2-bit value. Descriptor::Kind kind = static_cast<Descriptor::Kind>((length & 0x1) | ((offset & 0x1) << 1)); // Clear off flag from last bit. length &= ~1u; offset &= ~1u; uintptr_t scopeStart = ucbp->pr_cache.fnstart + offset; uintptr_t scopeEnd = scopeStart + length; uintptr_t pc = _Unwind_GetIP(context); bool isInScope = (scopeStart <= pc) && (pc < scopeEnd); switch (kind) { case Descriptor::CLEANUP: { // TODO(ajwong): Handle cleanup descriptors. break; } case Descriptor::FUNC: { // TODO(ajwong): Handle function descriptors. break; } case Descriptor::CATCH: { // Catch descriptors require gobbling one more word. uint32_t landing_pad; descriptor = getNextWord(descriptor, &landing_pad); if (isInScope) { // TODO(ajwong): This is only phase1 compatible logic. Implement // phase2. landing_pad = signExtendPrel31(landing_pad & ~0x80000000); if (landing_pad == 0xffffffff) { return _URC_HANDLER_FOUND; } else if (landing_pad == 0xfffffffe) { return _URC_FAILURE; } else { /* bool is_reference_type = landing_pad & 0x80000000; void* matched_object; if (__cxxabiv1::__cxa_type_match( ucbp, reinterpret_cast<const std::type_info *>(landing_pad), is_reference_type, &matched_object) != __cxxabiv1::ctm_failed) return _URC_HANDLER_FOUND; */ _LIBUNWIND_ABORT("Type matching not implemented"); } } break; } default: _LIBUNWIND_ABORT("Invalid descriptor kind found."); } getNextWord(descriptor, &descriptorWord); } return _URC_CONTINUE_UNWIND; } static _Unwind_Reason_Code unwindOneFrame(_Unwind_State state, _Unwind_Control_Block* ucbp, struct _Unwind_Context* context) { // Read the compact model EHT entry's header # 6.3 const uint32_t* unwindingData = ucbp->pr_cache.ehtp; assert((*unwindingData & 0xf0000000) == 0x80000000 && "Must be a compact entry"); Descriptor::Format format = static_cast<Descriptor::Format>((*unwindingData & 0x0f000000) >> 24); const char *lsda = reinterpret_cast<const char *>(_Unwind_GetLanguageSpecificData(context)); // Handle descriptors before unwinding so they are processed in the context // of the correct stack frame. _Unwind_Reason_Code result = ProcessDescriptors(state, ucbp, context, format, lsda, ucbp->pr_cache.additional); if (result != _URC_CONTINUE_UNWIND) return result; if (__unw_step(reinterpret_cast<unw_cursor_t *>(context)) != UNW_STEP_SUCCESS) return _URC_FAILURE; return _URC_CONTINUE_UNWIND; } // Generates mask discriminator for _Unwind_VRS_Pop, e.g. for _UVRSC_CORE / // _UVRSD_UINT32. uint32_t RegisterMask(uint8_t start, uint8_t count_minus_one) { return ((1U << (count_minus_one + 1)) - 1) << start; } // Generates mask discriminator for _Unwind_VRS_Pop, e.g. for _UVRSC_VFP / // _UVRSD_DOUBLE. uint32_t RegisterRange(uint8_t start, uint8_t count_minus_one) { return ((uint32_t)start << 16) | ((uint32_t)count_minus_one + 1); } } // end anonymous namespace /** * Decodes an EHT entry. * * @param data Pointer to EHT. * @param[out] off Offset from return value (in bytes) to begin interpretation. * @param[out] len Number of bytes in unwind code. * @return Pointer to beginning of unwind code. */ extern "C" const uint32_t* decode_eht_entry(const uint32_t* data, size_t* off, size_t* len) { if ((*data & 0x80000000) == 0) { // 6.2: Generic Model // // EHT entry is a prel31 pointing to the PR, followed by data understood // only by the personality routine. Fortunately, all existing assembler // implementations, including GNU assembler, LLVM integrated assembler, // and ARM assembler, assume that the unwind opcodes come after the // personality routine address. *off = 1; // First byte is size data. *len = (((data[1] >> 24) & 0xff) + 1) * 4; data++; // Skip the first word, which is the prel31 offset. } else { // 6.3: ARM Compact Model // // EHT entries here correspond to the __aeabi_unwind_cpp_pr[012] PRs indeded // by format: Descriptor::Format format = static_cast<Descriptor::Format>((*data & 0x0f000000) >> 24); switch (format) { case Descriptor::SU16: *len = 4; *off = 1; break; case Descriptor::LU16: case Descriptor::LU32: *len = 4 + 4 * ((*data & 0x00ff0000) >> 16); *off = 2; break; default: return nullptr; } } return data; } _LIBUNWIND_EXPORT _Unwind_Reason_Code _Unwind_VRS_Interpret(_Unwind_Context *context, const uint32_t *data, size_t offset, size_t len) { bool wrotePC = false; bool finish = false; while (offset < len && !finish) { uint8_t byte = getByte(data, offset++); if ((byte & 0x80) == 0) { uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp); if (byte & 0x40) sp -= (((uint32_t)byte & 0x3f) << 2) + 4; else sp += ((uint32_t)byte << 2) + 4; _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } else { switch (byte & 0xf0) { case 0x80: { if (offset >= len) return _URC_FAILURE; uint32_t registers = (((uint32_t)byte & 0x0f) << 12) | (((uint32_t)getByte(data, offset++)) << 4); if (!registers) return _URC_FAILURE; if (registers & (1 << 15)) wrotePC = true; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0x90: { uint8_t reg = byte & 0x0f; if (reg == 13 || reg == 15) return _URC_FAILURE; uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_R0 + reg, _UVRSD_UINT32, &sp); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); break; } case 0xa0: { uint32_t registers = RegisterMask(4, byte & 0x07); if (byte & 0x08) registers |= 1 << 14; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0xb0: { switch (byte) { case 0xb0: finish = true; break; case 0xb1: { if (offset >= len) return _URC_FAILURE; uint8_t registers = getByte(data, offset++); if (registers & 0xf0 || !registers) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_CORE, registers, _UVRSD_UINT32); break; } case 0xb2: { uint32_t addend = 0; uint32_t shift = 0; // This decodes a uleb128 value. while (true) { if (offset >= len) return _URC_FAILURE; uint32_t v = getByte(data, offset++); addend |= (v & 0x7f) << shift; if ((v & 0x80) == 0) break; shift += 7; } uint32_t sp; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp); sp += 0x204 + (addend << 2); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); break; } case 0xb3: { uint8_t v = getByte(data, offset++); _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(static_cast<uint8_t>(v >> 4), v & 0x0f), _UVRSD_VFPX); break; } case 0xb4: case 0xb5: case 0xb6: case 0xb7: return _URC_FAILURE; default: _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(8, byte & 0x07), _UVRSD_VFPX); break; } break; } case 0xc0: { switch (byte) { #if defined(__ARM_WMMX) case 0xc0: case 0xc1: case 0xc2: case 0xc3: case 0xc4: case 0xc5: _Unwind_VRS_Pop(context, _UVRSC_WMMXD, RegisterRange(10, byte & 0x7), _UVRSD_DOUBLE); break; case 0xc6: { uint8_t v = getByte(data, offset++); uint8_t start = static_cast<uint8_t>(v >> 4); uint8_t count_minus_one = v & 0xf; if (start + count_minus_one >= 16) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_WMMXD, RegisterRange(start, count_minus_one), _UVRSD_DOUBLE); break; } case 0xc7: { uint8_t v = getByte(data, offset++); if (!v || v & 0xf0) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_WMMXC, v, _UVRSD_DOUBLE); break; } #endif case 0xc8: case 0xc9: { uint8_t v = getByte(data, offset++); uint8_t start = static_cast<uint8_t>(((byte == 0xc8) ? 16 : 0) + (v >> 4)); uint8_t count_minus_one = v & 0xf; if (start + count_minus_one >= 32) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(start, count_minus_one), _UVRSD_DOUBLE); break; } default: return _URC_FAILURE; } break; } case 0xd0: { if (byte & 0x08) return _URC_FAILURE; _Unwind_VRS_Pop(context, _UVRSC_VFP, RegisterRange(8, byte & 0x7), _UVRSD_DOUBLE); break; } default: return _URC_FAILURE; } } } if (!wrotePC) { uint32_t lr; _Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_LR, _UVRSD_UINT32, &lr); _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_IP, _UVRSD_UINT32, &lr, NULL); } return _URC_CONTINUE_UNWIND; } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *ucbp, _Unwind_Context *context) { return unwindOneFrame(state, ucbp, context); } static _Unwind_Reason_Code unwind_phase1(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object) { // EHABI #7.3 discusses preserving the VRS in a "temporary VRS" during // phase 1 and then restoring it to the "primary VRS" for phase 2. The // effect is phase 2 doesn't see any of the VRS manipulations from phase 1. // In this implementation, the phases don't share the VRS backing store. // Instead, they are passed the original |uc| and they create a new VRS // from scratch thus achieving the same effect. __unw_init_local(cursor, uc); // Walk each frame looking for a place to stop. for (bool handlerNotFound = true; handlerNotFound;) { // See if frame has code to run (has personality routine). unw_proc_info_t frameInfo; if (__unw_get_proc_info(cursor, &frameInfo) != UNW_ESUCCESS) { _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): __unw_get_proc_info " "failed => _URC_FATAL_PHASE1_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE1_ERROR; } // When tracing, print state information. if (_LIBUNWIND_TRACING_UNWINDING) { char functionBuf[512]; const char *functionName = functionBuf; unw_word_t offset; if ((__unw_get_proc_name(cursor, functionBuf, sizeof(functionBuf), &offset) != UNW_ESUCCESS) || (frameInfo.start_ip + offset > frameInfo.end_ip)) functionName = ".anonymous."; unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): pc=0x%" PRIxPTR ", start_ip=0x%" PRIxPTR ", func=%s, " "lsda=0x%" PRIxPTR ", personality=0x%" PRIxPTR, static_cast<void *>(exception_object), pc, frameInfo.start_ip, functionName, frameInfo.lsda, frameInfo.handler); } // If there is a personality routine, ask it if it will want to stop at // this frame. if (frameInfo.handler != 0) { __personality_routine p = (__personality_routine)(long)(frameInfo.handler); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): calling personality function %p", static_cast<void *>(exception_object), reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(p))); struct _Unwind_Context *context = (struct _Unwind_Context *)(cursor); exception_object->pr_cache.fnstart = frameInfo.start_ip; exception_object->pr_cache.ehtp = (_Unwind_EHT_Header *)frameInfo.unwind_info; exception_object->pr_cache.additional = frameInfo.flags; _Unwind_Reason_Code personalityResult = (*p)(_US_VIRTUAL_UNWIND_FRAME, exception_object, context); _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): personality result %d start_ip %x ehtp %p " "additional %x", static_cast<void *>(exception_object), personalityResult, exception_object->pr_cache.fnstart, static_cast<void *>(exception_object->pr_cache.ehtp), exception_object->pr_cache.additional); switch (personalityResult) { case _URC_HANDLER_FOUND: // found a catch clause or locals that need destructing in this frame // stop search and remember stack pointer at the frame handlerNotFound = false; // p should have initialized barrier_cache. EHABI #7.3.5 _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_HANDLER_FOUND", static_cast<void *>(exception_object)); return _URC_NO_REASON; case _URC_CONTINUE_UNWIND: _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_CONTINUE_UNWIND", static_cast<void *>(exception_object)); // continue unwinding break; // EHABI #7.3.3 case _URC_FAILURE: return _URC_FAILURE; default: // something went wrong _LIBUNWIND_TRACE_UNWINDING( "unwind_phase1(ex_ojb=%p): _URC_FATAL_PHASE1_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE1_ERROR; } } } return _URC_NO_REASON; } static _Unwind_Reason_Code unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object, bool resume) { // See comment at the start of unwind_phase1 regarding VRS integrity. __unw_init_local(cursor, uc); _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p)", static_cast<void *>(exception_object)); int frame_count = 0; // Walk each frame until we reach where search phase said to stop. while (true) { // Ask libunwind to get next frame (skip over first which is // _Unwind_RaiseException or _Unwind_Resume). // // Resume only ever makes sense for 1 frame. _Unwind_State state = resume ? _US_UNWIND_FRAME_RESUME : _US_UNWIND_FRAME_STARTING; if (resume && frame_count == 1) { // On a resume, first unwind the _Unwind_Resume() frame. The next frame // is now the landing pad for the cleanup from a previous execution of // phase2. To continue unwindingly correctly, replace VRS[15] with the // IP of the frame that the previous run of phase2 installed the context // for. After this, continue unwinding as if normal. // // See #7.4.6 for details. __unw_set_reg(cursor, UNW_REG_IP, exception_object->unwinder_cache.reserved2, NULL); resume = false; } // Get info about this frame. unw_word_t sp; unw_proc_info_t frameInfo; __unw_get_reg(cursor, UNW_REG_SP, &sp); if (__unw_get_proc_info(cursor, &frameInfo) != UNW_ESUCCESS) { _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): __unw_get_proc_info " "failed => _URC_FATAL_PHASE2_ERROR", static_cast<void *>(exception_object)); return _URC_FATAL_PHASE2_ERROR; } // When tracing, print state information. if (_LIBUNWIND_TRACING_UNWINDING) { char functionBuf[512]; const char *functionName = functionBuf; unw_word_t offset; if ((__unw_get_proc_name(cursor, functionBuf, sizeof(functionBuf), &offset) != UNW_ESUCCESS) || (frameInfo.start_ip + offset > frameInfo.end_ip)) functionName = ".anonymous."; _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): start_ip=0x%" PRIxPTR ", func=%s, sp=0x%" PRIxPTR ", " "lsda=0x%" PRIxPTR ", personality=0x%" PRIxPTR "", static_cast<void *>(exception_object), frameInfo.start_ip, functionName, sp, frameInfo.lsda, frameInfo.handler); } // If there is a personality routine, tell it we are unwinding. if (frameInfo.handler != 0) { __personality_routine p = (__personality_routine)(long)(frameInfo.handler); struct _Unwind_Context *context = (struct _Unwind_Context *)(cursor); // EHABI #7.2 exception_object->pr_cache.fnstart = frameInfo.start_ip; exception_object->pr_cache.ehtp = (_Unwind_EHT_Header *)frameInfo.unwind_info; exception_object->pr_cache.additional = frameInfo.flags; _Unwind_Reason_Code personalityResult = (*p)(state, exception_object, context); switch (personalityResult) { case _URC_CONTINUE_UNWIND: // Continue unwinding _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): _URC_CONTINUE_UNWIND", static_cast<void *>(exception_object)); // EHABI #7.2 if (sp == exception_object->barrier_cache.sp) { // Phase 1 said we would stop at this frame, but we did not... _LIBUNWIND_ABORT("during phase1 personality function said it would " "stop here, but now in phase2 it did not stop here"); } break; case _URC_INSTALL_CONTEXT: _LIBUNWIND_TRACE_UNWINDING( "unwind_phase2(ex_ojb=%p): _URC_INSTALL_CONTEXT", static_cast<void *>(exception_object)); // Personality routine says to transfer control to landing pad. // We may get control back if landing pad calls _Unwind_Resume(). if (_LIBUNWIND_TRACING_UNWINDING) { unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); __unw_get_reg(cursor, UNW_REG_SP, &sp); _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p): re-entering " "user code with ip=0x%" PRIxPTR ", sp=0x%" PRIxPTR, static_cast<void *>(exception_object), pc, sp); } { // EHABI #7.4.1 says we need to preserve pc for when _Unwind_Resume // is called back, to find this same frame. unw_word_t pc; __unw_get_reg(cursor, UNW_REG_IP, &pc); exception_object->unwinder_cache.reserved2 = (uint32_t)pc; } __unw_resume(cursor); // __unw_resume() only returns if there was an error. return _URC_FATAL_PHASE2_ERROR; // # EHABI #7.4.3 case _URC_FAILURE: abort(); default: // Personality routine returned an unknown result code. _LIBUNWIND_DEBUG_LOG("personality function returned unknown result %d", personalityResult); return _URC_FATAL_PHASE2_ERROR; } } frame_count++; } // Clean up phase did not resume at the frame that the search phase // said it would... return _URC_FATAL_PHASE2_ERROR; } /// Called by __cxa_throw. Only returns if there is a fatal error. _LIBUNWIND_EXPORT _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_RaiseException(ex_obj=%p)", static_cast<void *>(exception_object)); unw_context_t uc; unw_cursor_t cursor; __unw_getcontext(&uc); // This field for is for compatibility with GCC to say this isn't a forced // unwind. EHABI #7.2 exception_object->unwinder_cache.reserved1 = 0; // phase 1: the search phase _Unwind_Reason_Code phase1 = unwind_phase1(&uc, &cursor, exception_object); if (phase1 != _URC_NO_REASON) return phase1; // phase 2: the clean up phase return unwind_phase2(&uc, &cursor, exception_object, false); } _LIBUNWIND_EXPORT void _Unwind_Complete(_Unwind_Exception* exception_object) { // This is to be called when exception handling completes to give us a chance // to perform any housekeeping. EHABI #7.2. But we have nothing to do here. (void)exception_object; } /// When _Unwind_RaiseException() is in phase2, it hands control /// to the personality function at each frame. The personality /// may force a jump to a landing pad in that function, the landing /// pad code may then call _Unwind_Resume() to continue with the /// unwinding. Note: the call to _Unwind_Resume() is from compiler /// generated user code. All other _Unwind_* routines are called /// by the C++ runtime __cxa_* routines. /// /// Note: re-throwing an exception (as opposed to continuing the unwind) /// is implemented by having the code call __cxa_rethrow() which /// in turn calls _Unwind_Resume_or_Rethrow(). _LIBUNWIND_EXPORT void _Unwind_Resume(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_Resume(ex_obj=%p)", static_cast<void *>(exception_object)); unw_context_t uc; unw_cursor_t cursor; __unw_getcontext(&uc); // _Unwind_RaiseException on EHABI will always set the reserved1 field to 0, // which is in the same position as private_1 below. // TODO(ajwong): Who wronte the above? Why is it true? unwind_phase2(&uc, &cursor, exception_object, true); // Clients assume _Unwind_Resume() does not return, so all we can do is abort. _LIBUNWIND_ABORT("_Unwind_Resume() can't return"); } /// Called by personality handler during phase 2 to get LSDA for current frame. _LIBUNWIND_EXPORT uintptr_t _Unwind_GetLanguageSpecificData(struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; unw_proc_info_t frameInfo; uintptr_t result = 0; if (__unw_get_proc_info(cursor, &frameInfo) == UNW_ESUCCESS) result = (uintptr_t)frameInfo.lsda; _LIBUNWIND_TRACE_API( "_Unwind_GetLanguageSpecificData(context=%p) => 0x%llx", static_cast<void *>(context), (long long)result); return result; } static uint64_t ValueAsBitPattern(_Unwind_VRS_DataRepresentation representation, void* valuep) { uint64_t value = 0; switch (representation) { case _UVRSD_UINT32: case _UVRSD_FLOAT: memcpy(&value, valuep, sizeof(uint32_t)); break; case _UVRSD_VFPX: case _UVRSD_UINT64: case _UVRSD_DOUBLE: memcpy(&value, valuep, sizeof(uint64_t)); break; } return value; } _LIBUNWIND_EXPORT _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep, unw_word_t *pos) { _LIBUNWIND_TRACE_API("_Unwind_VRS_Set(context=%p, regclass=%d, reg=%d, " "rep=%d, value=0x%llX)", static_cast<void *>(context), regclass, regno, representation, ValueAsBitPattern(representation, valuep)); unw_cursor_t *cursor = (unw_cursor_t *)context; switch (regclass) { case _UVRSC_CORE: if (representation != _UVRSD_UINT32 || regno > 15) return _UVRSR_FAILED; return __unw_set_reg(cursor, (unw_regnum_t)(UNW_ARM_R0 + regno), *(unw_word_t *)valuep,(unw_word_t *)pos) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_VFP: if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; if (representation == _UVRSD_VFPX) { // Can only touch d0-15 with FSTMFDX. if (regno > 15) return _UVRSR_FAILED; __unw_save_vfp_as_X(cursor); } else { if (regno > 31) return _UVRSR_FAILED; } return __unw_set_fpreg(cursor, (unw_regnum_t)(UNW_ARM_D0 + regno), *(unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #if defined(__ARM_WMMX) case _UVRSC_WMMXC: if (representation != _UVRSD_UINT32 || regno > 3) return _UVRSR_FAILED; return __unw_set_reg(cursor, (unw_regnum_t)(UNW_ARM_WC0 + regno), *(unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_WMMXD: if (representation != _UVRSD_DOUBLE || regno > 31) return _UVRSR_FAILED; return __unw_set_fpreg(cursor, (unw_regnum_t)(UNW_ARM_WR0 + regno), *(unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #else case _UVRSC_WMMXC: case _UVRSC_WMMXD: break; #endif } _LIBUNWIND_ABORT("unsupported register class"); } static _Unwind_VRS_Result _Unwind_VRS_Get_Internal(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep) { unw_cursor_t *cursor = (unw_cursor_t *)context; switch (regclass) { case _UVRSC_CORE: if (representation != _UVRSD_UINT32 || regno > 15) return _UVRSR_FAILED; return __unw_get_reg(cursor, (unw_regnum_t)(UNW_ARM_R0 + regno), (unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_VFP: if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; if (representation == _UVRSD_VFPX) { // Can only touch d0-15 with FSTMFDX. if (regno > 15) return _UVRSR_FAILED; __unw_save_vfp_as_X(cursor); } else { if (regno > 31) return _UVRSR_FAILED; } return __unw_get_fpreg(cursor, (unw_regnum_t)(UNW_ARM_D0 + regno), (unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #if defined(__ARM_WMMX) case _UVRSC_WMMXC: if (representation != _UVRSD_UINT32 || regno > 3) return _UVRSR_FAILED; return __unw_get_reg(cursor, (unw_regnum_t)(UNW_ARM_WC0 + regno), (unw_word_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; case _UVRSC_WMMXD: if (representation != _UVRSD_DOUBLE || regno > 31) return _UVRSR_FAILED; return __unw_get_fpreg(cursor, (unw_regnum_t)(UNW_ARM_WR0 + regno), (unw_fpreg_t *)valuep) == UNW_ESUCCESS ? _UVRSR_OK : _UVRSR_FAILED; #else case _UVRSC_WMMXC: case _UVRSC_WMMXD: break; #endif } _LIBUNWIND_ABORT("unsupported register class"); } _LIBUNWIND_EXPORT _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep) { _Unwind_VRS_Result result = _Unwind_VRS_Get_Internal(context, regclass, regno, representation, valuep); _LIBUNWIND_TRACE_API("_Unwind_VRS_Get(context=%p, regclass=%d, reg=%d, " "rep=%d, value=0x%llX, result = %d)", static_cast<void *>(context), regclass, regno, representation, ValueAsBitPattern(representation, valuep), result); return result; } _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t discriminator, _Unwind_VRS_DataRepresentation representation) { _LIBUNWIND_TRACE_API("_Unwind_VRS_Pop(context=%p, regclass=%d, " "discriminator=%d, representation=%d)", static_cast<void *>(context), regclass, discriminator, representation); switch (regclass) { case _UVRSC_WMMXC: #if !defined(__ARM_WMMX) break; #endif case _UVRSC_CORE: { if (representation != _UVRSD_UINT32) return _UVRSR_FAILED; // When popping SP from the stack, we don't want to override it from the // computed new stack location. See EHABI #7.5.4 table 3. bool poppedSP = false; uint32_t* sp; uint32_t* pos; if (_Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp) != _UVRSR_OK) { return _UVRSR_FAILED; } for (uint32_t i = 0; i < 16; ++i) { if (!(discriminator & static_cast<uint32_t>(1 << i))) continue; pos = sp; uint32_t value = *sp++; if (regclass == _UVRSC_CORE && i == 13) poppedSP = true; if (_Unwind_VRS_Set(context, regclass, i, _UVRSD_UINT32, &value, pos) != _UVRSR_OK) { return _UVRSR_FAILED; } } if (!poppedSP) { return _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } return _UVRSR_OK; } case _UVRSC_WMMXD: #if !defined(__ARM_WMMX) break; #endif case _UVRSC_VFP: { if (representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) return _UVRSR_FAILED; uint32_t first = discriminator >> 16; uint32_t count = discriminator & 0xffff; uint32_t end = first+count; uint32_t* sp; if (_Unwind_VRS_Get(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp) != _UVRSR_OK) { return _UVRSR_FAILED; } // For _UVRSD_VFPX, we're assuming the data is stored in FSTMX "standard // format 1", which is equivalent to FSTMD + a padding word. for (uint32_t i = first; i < end; ++i) { // SP is only 32-bit aligned so don't copy 64-bit at a time. uint64_t value = *sp++; value |= ((uint64_t)(*sp++)) << 32; if (_Unwind_VRS_Set(context, regclass, i, representation, &value, NULL) != _UVRSR_OK) return _UVRSR_FAILED; } if (representation == _UVRSD_VFPX) ++sp; return _Unwind_VRS_Set(context, _UVRSC_CORE, UNW_ARM_SP, _UVRSD_UINT32, &sp, NULL); } } _LIBUNWIND_ABORT("unsupported register class"); } /// Called by personality handler during phase 2 to find the start of the /// function. _LIBUNWIND_EXPORT uintptr_t _Unwind_GetRegionStart(struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; unw_proc_info_t frameInfo; uintptr_t result = 0; if (__unw_get_proc_info(cursor, &frameInfo) == UNW_ESUCCESS) result = (uintptr_t)frameInfo.start_ip; _LIBUNWIND_TRACE_API("_Unwind_GetRegionStart(context=%p) => 0x%llX", static_cast<void *>(context), (long long)result); return result; } /// Called by personality handler during phase 2 if a foreign exception // is caught. _LIBUNWIND_EXPORT void _Unwind_DeleteException(_Unwind_Exception *exception_object) { _LIBUNWIND_TRACE_API("_Unwind_DeleteException(ex_obj=%p)", static_cast<void *>(exception_object)); if (exception_object->exception_cleanup != NULL) (*exception_object->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, exception_object); } extern "C" _LIBUNWIND_EXPORT _Unwind_Reason_Code __gnu_unwind_frame(_Unwind_Exception *exception_object, struct _Unwind_Context *context) { unw_cursor_t *cursor = (unw_cursor_t *)context; if (__unw_step(cursor) != UNW_STEP_SUCCESS) return _URC_FAILURE; return _URC_OK; } #endif // defined(_LIBUNWIND_ARM_EHABI)
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/debug/di/rsassembly.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: RsAssembly.cpp // // //***************************************************************************** #include "stdafx.h" #include "primitives.h" #include "safewrap.h" #include "check.h" #include <tlhelp32.h> #include "wtsapi32.h" #ifndef SM_REMOTESESSION #define SM_REMOTESESSION 0x1000 #endif #include "corpriv.h" #include "../../dlls/mscorrc/resource.h" #include <limits.h> /* ------------------------------------------------------------------------- * * Assembly class * ------------------------------------------------------------------------- */ CordbAssembly::CordbAssembly(CordbAppDomain * pAppDomain, VMPTR_Assembly vmAssembly, VMPTR_DomainAssembly vmDomainAssembly) : CordbBase(pAppDomain->GetProcess(), vmDomainAssembly.IsNull() ? VmPtrToCookie(vmAssembly) : VmPtrToCookie(vmDomainAssembly), enumCordbAssembly), m_vmAssembly(vmAssembly), m_vmDomainAssembly(vmDomainAssembly), m_pAppDomain(pAppDomain) { _ASSERTE(!vmAssembly.IsNull()); } /* A list of which resources owned by this object are accounted for. public: CordbAppDomain *m_pAppDomain; // Assigned w/o addRef(), Deleted in ~CordbAssembly */ CordbAssembly::~CordbAssembly() { } HRESULT CordbAssembly::QueryInterface(REFIID id, void **ppInterface) { if (id == IID_ICorDebugAssembly) *ppInterface = static_cast<ICorDebugAssembly*>(this); else if (id == IID_ICorDebugAssembly2) *ppInterface = static_cast<ICorDebugAssembly2*>(this); else if (id == IID_IUnknown) *ppInterface = static_cast<IUnknown*>( static_cast<ICorDebugAssembly*>(this) ); else { *ppInterface = NULL; return E_NOINTERFACE; } ExternalAddRef(); return S_OK; } // Neutered by AppDomain void CordbAssembly::Neuter() { m_pAppDomain = NULL; CordbBase::Neuter(); } #ifdef _DEBUG //--------------------------------------------------------------------------------------- // Callback helper for code:CordbAssembly::DbgAssertAssemblyDeleted // // Arguments // vmDomainAssembly - domain file in the enumeration // pUserData - pointer to the CordbAssembly that we just got an exit event for. // // static void CordbAssembly::DbgAssertAssemblyDeletedCallback(VMPTR_DomainAssembly vmDomainAssembly, void * pUserData) { CordbAssembly * pThis = reinterpret_cast<CordbAssembly * >(pUserData); INTERNAL_DAC_CALLBACK(pThis->GetProcess()); VMPTR_DomainAssembly vmAssemblyDeleted = pThis->m_vmDomainAssembly; CONSISTENCY_CHECK_MSGF((vmAssemblyDeleted != vmDomainAssembly), ("An Assembly Unload event was sent, but the assembly still shows up in the enumeration.\n vmAssemblyDeleted=%p\n", VmPtrToCookie(vmAssemblyDeleted))); } //--------------------------------------------------------------------------------------- // Assert that a assembly is no longer discoverable via enumeration. // // Notes: // See code:IDacDbiInterface#Enumeration for rules that we're asserting. // This is a debug only method. It's conceptually similar to // code:CordbProcess::DbgAssertAppDomainDeleted. // void CordbAssembly::DbgAssertAssemblyDeleted() { GetProcess()->GetDAC()->EnumerateAssembliesInAppDomain( GetAppDomain()->GetADToken(), CordbAssembly::DbgAssertAssemblyDeletedCallback, this); } #endif // _DEBUG /* * GetProcess returns the process containing the assembly */ HRESULT CordbAssembly::GetProcess(ICorDebugProcess **ppProcess) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess **); return (m_pAppDomain->GetProcess (ppProcess)); } // // Returns the AppDomain that this assembly belongs to. // // Arguments: // ppAppDomain - a non-NULL pointer to store the AppDomain in. // // Return Value: // S_OK // // Notes: // On the debugger right-side we currently consider every assembly to belong // to a single AppDomain, and create multiple CordbAssembly instances (one // per AppDomain) to represent domain-neutral assemblies. // HRESULT CordbAssembly::GetAppDomain(ICorDebugAppDomain **ppAppDomain) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT(ppAppDomain, ICorDebugAppDomain **); _ASSERTE(m_pAppDomain != NULL); *ppAppDomain = static_cast<ICorDebugAppDomain *> (m_pAppDomain); m_pAppDomain->ExternalAddRef(); return S_OK; } /* * EnumerateModules enumerates all modules in the assembly */ HRESULT CordbAssembly::EnumerateModules(ICorDebugModuleEnum **ppModules) { HRESULT hr = S_OK; PUBLIC_API_BEGIN(this); { ValidateOrThrow(ppModules); *ppModules = NULL; m_pAppDomain->PrepopulateModules(); RSInitHolder<CordbEnumFilter> pModEnum( new CordbEnumFilter(GetProcess(), GetProcess()->GetContinueNeuterList())); RSInitHolder<CordbHashTableEnum> pEnum; CordbHashTableEnum::BuildOrThrow( this, NULL, // ownership &m_pAppDomain->m_modules, IID_ICorDebugModuleEnum, pEnum.GetAddr()); // this will build up an auxillary list. Don't need pEnum after this. hr = pModEnum->Init(pEnum, this); IfFailThrow(hr); pModEnum.TransferOwnershipExternal(ppModules); } PUBLIC_API_END(hr); return hr; } /* * GetCodeBase returns the code base used to load the assembly */ HRESULT CordbAssembly::GetCodeBase(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[]) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT_ARRAY(szName, WCHAR, cchName, true, true); VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcchName, ULONG32 *); return E_NOTIMPL; } // // Gets the filename of the assembly // // Arguments: // cchName - number of characters available in szName, or 0 to query length // pcchName - optional pointer to store the real length of the filename // szName - buffer in which to copy the filename, or NULL if cchName is 0. // // Return value: // S_OK on success (even if there is no filename). // An error code if the filename could not be read for the assembly. This should // not happen unless the target is corrupt. // // Notes: // In-memory assemblies do not have a filename. In that case, for compatibility // this returns success and the string "<unknown>". We may want to change this // behavior in the future. // HRESULT CordbAssembly::GetName(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[]) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(szName, WCHAR, cchName, true, true); VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcchName, ULONG32 *); HRESULT hr = S_OK; EX_TRY { // Lazily initialize our cache of the assembly filename. // Note that if this fails, we'll try again next time this is called. // This can be convenient for transient errors and debugging purposes, but could cause a // performance problem if failure was common (it should not be). if (!m_strAssemblyFileName.IsSet()) { IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws BOOL fNonEmpty = pDac->GetAssemblyPath(m_vmAssembly, &m_strAssemblyFileName); // throws _ASSERTE(m_strAssemblyFileName.IsSet()); if (!fNonEmpty) { // File name is empty (eg. for an in-memory assembly) _ASSERTE(m_strAssemblyFileName.IsEmpty()); // Construct a fake name // This seems unwise - the assembly doesn't have a filename, we should probably just return // an empty string and S_FALSE. This is a common case (in-memory assemblies), I don't see any reason to // fake up a filename to pretend that it has a disk location when it doesn't. // But I don't want to break tests at the moment that expect this. // Note that all assemblies have a simple metadata name - perhaps we should have an additional API for that. m_strAssemblyFileName.AssignCopy(W("<unknown>")); } } // We should now have a non-empty string _ASSERTE(m_strAssemblyFileName.IsSet()); _ASSERTE(!m_strAssemblyFileName.IsEmpty()); // Copy it out to our caller } EX_CATCH_HRESULT(hr); if (FAILED(hr)) { return hr; } return CopyOutString(m_strAssemblyFileName, cchName, pcchName, szName); } HRESULT CordbAssembly::IsFullyTrusted( BOOL *pbFullyTrusted ) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess()); VALIDATE_POINTER_TO_OBJECT(pbFullyTrusted, BOOL*); if (m_vmDomainAssembly.IsNull()) return E_UNEXPECTED; // Check for cached result if( m_foptIsFullTrust.HasValue() ) { *pbFullyTrusted = m_foptIsFullTrust.GetValue(); return S_OK; } HRESULT hr = S_OK; EX_TRY { CordbProcess * pProcess = m_pAppDomain->GetProcess(); IDacDbiInterface * pDac = pProcess->GetDAC(); BOOL fIsFullTrust = pDac->IsAssemblyFullyTrusted(m_vmDomainAssembly); // Once the trust level of an assembly is known, it cannot change. m_foptIsFullTrust = fIsFullTrust; *pbFullyTrusted = fIsFullTrust; } EX_CATCH_HRESULT(hr); return hr; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // File: RsAssembly.cpp // // //***************************************************************************** #include "stdafx.h" #include "primitives.h" #include "safewrap.h" #include "check.h" #include <tlhelp32.h> #include "wtsapi32.h" #ifndef SM_REMOTESESSION #define SM_REMOTESESSION 0x1000 #endif #include "corpriv.h" #include "../../dlls/mscorrc/resource.h" #include <limits.h> /* ------------------------------------------------------------------------- * * Assembly class * ------------------------------------------------------------------------- */ CordbAssembly::CordbAssembly(CordbAppDomain * pAppDomain, VMPTR_Assembly vmAssembly, VMPTR_DomainAssembly vmDomainAssembly) : CordbBase(pAppDomain->GetProcess(), vmDomainAssembly.IsNull() ? VmPtrToCookie(vmAssembly) : VmPtrToCookie(vmDomainAssembly), enumCordbAssembly), m_vmAssembly(vmAssembly), m_vmDomainAssembly(vmDomainAssembly), m_pAppDomain(pAppDomain) { _ASSERTE(!vmAssembly.IsNull()); } /* A list of which resources owned by this object are accounted for. public: CordbAppDomain *m_pAppDomain; // Assigned w/o addRef(), Deleted in ~CordbAssembly */ CordbAssembly::~CordbAssembly() { } HRESULT CordbAssembly::QueryInterface(REFIID id, void **ppInterface) { if (id == IID_ICorDebugAssembly) *ppInterface = static_cast<ICorDebugAssembly*>(this); else if (id == IID_ICorDebugAssembly2) *ppInterface = static_cast<ICorDebugAssembly2*>(this); else if (id == IID_IUnknown) *ppInterface = static_cast<IUnknown*>( static_cast<ICorDebugAssembly*>(this) ); else { *ppInterface = NULL; return E_NOINTERFACE; } ExternalAddRef(); return S_OK; } // Neutered by AppDomain void CordbAssembly::Neuter() { m_pAppDomain = NULL; CordbBase::Neuter(); } #ifdef _DEBUG //--------------------------------------------------------------------------------------- // Callback helper for code:CordbAssembly::DbgAssertAssemblyDeleted // // Arguments // vmDomainAssembly - domain file in the enumeration // pUserData - pointer to the CordbAssembly that we just got an exit event for. // // static void CordbAssembly::DbgAssertAssemblyDeletedCallback(VMPTR_DomainAssembly vmDomainAssembly, void * pUserData) { CordbAssembly * pThis = reinterpret_cast<CordbAssembly * >(pUserData); INTERNAL_DAC_CALLBACK(pThis->GetProcess()); VMPTR_DomainAssembly vmAssemblyDeleted = pThis->m_vmDomainAssembly; CONSISTENCY_CHECK_MSGF((vmAssemblyDeleted != vmDomainAssembly), ("An Assembly Unload event was sent, but the assembly still shows up in the enumeration.\n vmAssemblyDeleted=%p\n", VmPtrToCookie(vmAssemblyDeleted))); } //--------------------------------------------------------------------------------------- // Assert that a assembly is no longer discoverable via enumeration. // // Notes: // See code:IDacDbiInterface#Enumeration for rules that we're asserting. // This is a debug only method. It's conceptually similar to // code:CordbProcess::DbgAssertAppDomainDeleted. // void CordbAssembly::DbgAssertAssemblyDeleted() { GetProcess()->GetDAC()->EnumerateAssembliesInAppDomain( GetAppDomain()->GetADToken(), CordbAssembly::DbgAssertAssemblyDeletedCallback, this); } #endif // _DEBUG /* * GetProcess returns the process containing the assembly */ HRESULT CordbAssembly::GetProcess(ICorDebugProcess **ppProcess) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess **); return (m_pAppDomain->GetProcess (ppProcess)); } // // Returns the AppDomain that this assembly belongs to. // // Arguments: // ppAppDomain - a non-NULL pointer to store the AppDomain in. // // Return Value: // S_OK // // Notes: // On the debugger right-side we currently consider every assembly to belong // to a single AppDomain, and create multiple CordbAssembly instances (one // per AppDomain) to represent domain-neutral assemblies. // HRESULT CordbAssembly::GetAppDomain(ICorDebugAppDomain **ppAppDomain) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT(ppAppDomain, ICorDebugAppDomain **); _ASSERTE(m_pAppDomain != NULL); *ppAppDomain = static_cast<ICorDebugAppDomain *> (m_pAppDomain); m_pAppDomain->ExternalAddRef(); return S_OK; } /* * EnumerateModules enumerates all modules in the assembly */ HRESULT CordbAssembly::EnumerateModules(ICorDebugModuleEnum **ppModules) { HRESULT hr = S_OK; PUBLIC_API_BEGIN(this); { ValidateOrThrow(ppModules); *ppModules = NULL; m_pAppDomain->PrepopulateModules(); RSInitHolder<CordbEnumFilter> pModEnum( new CordbEnumFilter(GetProcess(), GetProcess()->GetContinueNeuterList())); RSInitHolder<CordbHashTableEnum> pEnum; CordbHashTableEnum::BuildOrThrow( this, NULL, // ownership &m_pAppDomain->m_modules, IID_ICorDebugModuleEnum, pEnum.GetAddr()); // this will build up an auxillary list. Don't need pEnum after this. hr = pModEnum->Init(pEnum, this); IfFailThrow(hr); pModEnum.TransferOwnershipExternal(ppModules); } PUBLIC_API_END(hr); return hr; } /* * GetCodeBase returns the code base used to load the assembly */ HRESULT CordbAssembly::GetCodeBase(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[]) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT_ARRAY(szName, WCHAR, cchName, true, true); VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcchName, ULONG32 *); return E_NOTIMPL; } // // Gets the filename of the assembly // // Arguments: // cchName - number of characters available in szName, or 0 to query length // pcchName - optional pointer to store the real length of the filename // szName - buffer in which to copy the filename, or NULL if cchName is 0. // // Return value: // S_OK on success (even if there is no filename). // An error code if the filename could not be read for the assembly. This should // not happen unless the target is corrupt. // // Notes: // In-memory assemblies do not have a filename. In that case, for compatibility // this returns success and the string "<unknown>". We may want to change this // behavior in the future. // HRESULT CordbAssembly::GetName(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[]) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(szName, WCHAR, cchName, true, true); VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcchName, ULONG32 *); HRESULT hr = S_OK; EX_TRY { // Lazily initialize our cache of the assembly filename. // Note that if this fails, we'll try again next time this is called. // This can be convenient for transient errors and debugging purposes, but could cause a // performance problem if failure was common (it should not be). if (!m_strAssemblyFileName.IsSet()) { IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws BOOL fNonEmpty = pDac->GetAssemblyPath(m_vmAssembly, &m_strAssemblyFileName); // throws _ASSERTE(m_strAssemblyFileName.IsSet()); if (!fNonEmpty) { // File name is empty (eg. for an in-memory assembly) _ASSERTE(m_strAssemblyFileName.IsEmpty()); // Construct a fake name // This seems unwise - the assembly doesn't have a filename, we should probably just return // an empty string and S_FALSE. This is a common case (in-memory assemblies), I don't see any reason to // fake up a filename to pretend that it has a disk location when it doesn't. // But I don't want to break tests at the moment that expect this. // Note that all assemblies have a simple metadata name - perhaps we should have an additional API for that. m_strAssemblyFileName.AssignCopy(W("<unknown>")); } } // We should now have a non-empty string _ASSERTE(m_strAssemblyFileName.IsSet()); _ASSERTE(!m_strAssemblyFileName.IsEmpty()); // Copy it out to our caller } EX_CATCH_HRESULT(hr); if (FAILED(hr)) { return hr; } return CopyOutString(m_strAssemblyFileName, cchName, pcchName, szName); } HRESULT CordbAssembly::IsFullyTrusted( BOOL *pbFullyTrusted ) { PUBLIC_API_ENTRY(this); FAIL_IF_NEUTERED(this); ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess()); VALIDATE_POINTER_TO_OBJECT(pbFullyTrusted, BOOL*); if (m_vmDomainAssembly.IsNull()) return E_UNEXPECTED; // Check for cached result if( m_foptIsFullTrust.HasValue() ) { *pbFullyTrusted = m_foptIsFullTrust.GetValue(); return S_OK; } HRESULT hr = S_OK; EX_TRY { CordbProcess * pProcess = m_pAppDomain->GetProcess(); IDacDbiInterface * pDac = pProcess->GetDAC(); BOOL fIsFullTrust = pDac->IsAssemblyFullyTrusted(m_vmDomainAssembly); // Once the trust level of an assembly is known, it cannot change. m_foptIsFullTrust = fIsFullTrust; *pbFullyTrusted = fIsFullTrust; } EX_CATCH_HRESULT(hr); return hr; }
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/inc/sha1.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef SHA1_H_ #define SHA1_H_ // Hasher class, performs no allocation and therefore does not throw or return // errors. Usage is as follows: // Create an instance (this initializes the hash). // Add one or more blocks of input data using AddData(). // Retrieve the hash using GetHash(). This can be done as many times as desired // until the object is destructed. Once a hash is asked for, further AddData // calls will be ignored. There is no way to reset object state (simply // destroy the object and create another instead). #define SHA1_HASH_SIZE 20 // Number of bytes output by SHA-1 typedef struct { DWORD magic_sha1; // Magic value for A_SHA_CTX DWORD awaiting_data[16]; // Data awaiting full 512-bit block. // Length (nbit_total[0] % 512) bits. // Unused part of buffer (at end) is zero DWORD partial_hash[5]; // Hash through last full block DWORD nbit_total[2]; // Total length of message so far // (bits, mod 2^64) } SHA1_CTX; class SHA1Hash { private: SHA1_CTX m_Context; BYTE m_Value[SHA1_HASH_SIZE]; BOOL m_fFinalized; void SHA1Init(SHA1_CTX*); void SHA1Update(SHA1_CTX*, const BYTE*, const DWORD); void SHA1Final(SHA1_CTX*, BYTE* digest); public: SHA1Hash(); void AddData(BYTE *pbData, DWORD cbData); BYTE *GetHash(); }; #endif // SHA1_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #ifndef SHA1_H_ #define SHA1_H_ // Hasher class, performs no allocation and therefore does not throw or return // errors. Usage is as follows: // Create an instance (this initializes the hash). // Add one or more blocks of input data using AddData(). // Retrieve the hash using GetHash(). This can be done as many times as desired // until the object is destructed. Once a hash is asked for, further AddData // calls will be ignored. There is no way to reset object state (simply // destroy the object and create another instead). #define SHA1_HASH_SIZE 20 // Number of bytes output by SHA-1 typedef struct { DWORD magic_sha1; // Magic value for A_SHA_CTX DWORD awaiting_data[16]; // Data awaiting full 512-bit block. // Length (nbit_total[0] % 512) bits. // Unused part of buffer (at end) is zero DWORD partial_hash[5]; // Hash through last full block DWORD nbit_total[2]; // Total length of message so far // (bits, mod 2^64) } SHA1_CTX; class SHA1Hash { private: SHA1_CTX m_Context; BYTE m_Value[SHA1_HASH_SIZE]; BOOL m_fFinalized; void SHA1Init(SHA1_CTX*); void SHA1Update(SHA1_CTX*, const BYTE*, const DWORD); void SHA1Final(SHA1_CTX*, BYTE* digest); public: SHA1Hash(); void AddData(BYTE *pbData, DWORD cbData); BYTE *GetHash(); }; #endif // SHA1_H_
-1
dotnet/runtime
66,193
Add xarch `blsi`
This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
Wraith2
2022-03-04T13:44:46Z
2022-03-15T00:53:39Z
436b97cc809a3db1d1a25faedbc64aa97875bae3
6bf873a991bcae3f80f5de155a594cefc8824eea
Add xarch `blsi`. This adds a lowering for the pattern `AND(x, NEG(x))` to the ExtractLowestSetBit hwintrinsic. The spmi replay is clean and there is only one asm diff: ```diff ; Assembly listing for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; Emitting BLENDED_CODE for X64 CPU with AVX - Windows ; optimized code ; rsp based frame ; partially interruptible ; No matching PGO data ; 0 inlinees with PGO data; 1 single block inlinees; 1 inlinees without PGO data ; Final local variable assignments ; -; V00 arg0 [V00,T00] ( 6, 5.50) int -> rsi single-def +; V00 arg0 [V00,T00] ( 5, 4.50) int -> rsi single-def ;* V01 loc0 [V01 ] ( 0, 0 ) int -> zero-ref ; V02 OutArgs [V02 ] ( 1, 1 ) lclBlk (32) [rsp+00H] "OutgoingArgSpace" ; V03 tmp1 [V03,T02] ( 3, 2 ) int -> rcx ; V04 tmp2 [V04,T01] ( 2, 4 ) bool -> rcx "Inlining Arg" ; V05 cse0 [V05,T03] ( 3, 1.50) ref -> rdx "CSE - moderate" ; ; Lcl frame size = 32 G_M29069_IG01: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, nogc <-- Prolog IG push rsi sub rsp, 32 mov esi, ecx ;; bbWeight=1 PerfScore 1.50 G_M29069_IG02: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 4 je SHORT G_M29069_IG04 ;; bbWeight=1 PerfScore 1.25 G_M29069_IG03: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz cmp esi, 5 sete cl movzx rcx, cl jmp SHORT G_M29069_IG05 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG04: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov ecx, 1 ;; bbWeight=0.50 PerfScore 0.12 G_M29069_IG05: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref, isz movzx rcx, cl test ecx, ecx jne SHORT G_M29069_IG07 ;; bbWeight=1 PerfScore 1.50 G_M29069_IG06: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref mov rcx, 0xD1FFAB1E ; string handle mov rdx, gword ptr [rcx] ; gcrRegs +[rdx] mov rcx, rdx ; gcrRegs +[rcx] call hackishModuleName:hackishMethodName() ; gcrRegs -[rcx rdx] ; gcr arg pop 0 ;; bbWeight=0.50 PerfScore 1.75 G_M29069_IG07: ; gcrefRegs=00000000 {}, byrefRegs=00000000 {}, byref + blsi eax, esi - mov eax, esi - neg eax - and eax, esi shl eax, 28 + ;; bbWeight=1 PerfScore 1.00 - ;; bbWeight=1 PerfScore 1.25 G_M29069_IG08: ; , epilog, nogc, extend add rsp, 32 pop rsi ret ;; bbWeight=1 PerfScore 1.75 +; Total bytes of code 70, prolog size 5, PerfScore 17.63, instruction count 22, allocated bytes for code 70 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int -; Total bytes of code 71, prolog size 5, PerfScore 17.98, instruction count 24, allocated bytes for code 71 (MethodHash=20958e72) for method System.String:GetCompareOptionsFromOrdinalStringComparison(int):int ; ============================================================ Unwind Info: >> Start offset : 0x000000 (not in unwind data) >> End offset : 0xd1ffab1e (not in unwind data) Version : 1 Flags : 0x00 SizeOfProlog : 0x05 CountOfUnwindCodes: 2 FrameRegister : none (0) FrameOffset : N/A (no FrameRegister) (Value=0) UnwindCodes : CodeOffset: 0x05 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 3 * 8 + 8 = 32 = 0x20 CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6) ``` The value is low but if it is ever used it is an improvement. I chose to open the PR even though the value is low so that even if this is closed anyone else ever wonders why `blsi` isn't used can see the results of implementing it. /cc @dotnet/jit-contrib
./src/coreclr/pal/tests/palsuite/c_runtime/swprintf/test17/test17.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test17.c ** ** Purpose:Tests swprintf with compact format doubles (lowercase) ** ** **==========================================================================*/ #include <palsuite.h> #include "../swprintf.h" /* * Uses memcmp & wcslen */ PALTEST(c_runtime_swprintf_test17_paltest_swprintf_test17, "c_runtime/swprintf/test17/paltest_swprintf_test17") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoDoubleTest(convert("foo %g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %lg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %hg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %Lg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %I64g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %5g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %-5g"), val, convert("foo 2560 "), convert("foo 2560 ")); DoDoubleTest(convert("foo %.1g"), val, convert("foo 3e+003"), convert("foo 3e+03")); DoDoubleTest(convert("foo %.2g"), val, convert("foo 2.6e+003"), convert("foo 2.6e+03")); DoDoubleTest(convert("foo %.12g"), val, convert("foo 2560.001"), convert("foo 2560.001")); DoDoubleTest(convert("foo %06g"), val, convert("foo 002560"), convert("foo 002560")); DoDoubleTest(convert("foo %#g"), val, convert("foo 2560.00"), convert("foo 2560.00")); DoDoubleTest(convert("foo %+g"), val, convert("foo +2560"), convert("foo +2560")); DoDoubleTest(convert("foo % g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %+g"), neg, convert("foo -2560"), convert("foo -2560")); DoDoubleTest(convert("foo % g"), neg, convert("foo -2560"), convert("foo -2560")); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test17.c ** ** Purpose:Tests swprintf with compact format doubles (lowercase) ** ** **==========================================================================*/ #include <palsuite.h> #include "../swprintf.h" /* * Uses memcmp & wcslen */ PALTEST(c_runtime_swprintf_test17_paltest_swprintf_test17, "c_runtime/swprintf/test17/paltest_swprintf_test17") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoDoubleTest(convert("foo %g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %lg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %hg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %Lg"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %I64g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %5g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %-5g"), val, convert("foo 2560 "), convert("foo 2560 ")); DoDoubleTest(convert("foo %.1g"), val, convert("foo 3e+003"), convert("foo 3e+03")); DoDoubleTest(convert("foo %.2g"), val, convert("foo 2.6e+003"), convert("foo 2.6e+03")); DoDoubleTest(convert("foo %.12g"), val, convert("foo 2560.001"), convert("foo 2560.001")); DoDoubleTest(convert("foo %06g"), val, convert("foo 002560"), convert("foo 002560")); DoDoubleTest(convert("foo %#g"), val, convert("foo 2560.00"), convert("foo 2560.00")); DoDoubleTest(convert("foo %+g"), val, convert("foo +2560"), convert("foo +2560")); DoDoubleTest(convert("foo % g"), val, convert("foo 2560"), convert("foo 2560")); DoDoubleTest(convert("foo %+g"), neg, convert("foo -2560"), convert("foo -2560")); DoDoubleTest(convert("foo % g"), neg, convert("foo -2560"), convert("foo -2560")); PAL_Terminate(); return PASS; }
-1